xref: /linux/drivers/infiniband/hw/ionic/ionic_datapath.c (revision b83c62055b6faabb444b2f8f3355420927cc39fd)
1*b83c6205SAbhijit Gangurde // SPDX-License-Identifier: GPL-2.0
2*b83c6205SAbhijit Gangurde /* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
3*b83c6205SAbhijit Gangurde 
4*b83c6205SAbhijit Gangurde #include <linux/module.h>
5*b83c6205SAbhijit Gangurde #include <linux/printk.h>
6*b83c6205SAbhijit Gangurde #include <rdma/ib_addr.h>
7*b83c6205SAbhijit Gangurde #include <rdma/ib_user_verbs.h>
8*b83c6205SAbhijit Gangurde 
9*b83c6205SAbhijit Gangurde #include "ionic_fw.h"
10*b83c6205SAbhijit Gangurde #include "ionic_ibdev.h"
11*b83c6205SAbhijit Gangurde 
12*b83c6205SAbhijit Gangurde #define IONIC_OP(version, opname) \
13*b83c6205SAbhijit Gangurde 	((version) < 2 ? IONIC_V1_OP_##opname : IONIC_V2_OP_##opname)
14*b83c6205SAbhijit Gangurde 
15*b83c6205SAbhijit Gangurde static bool ionic_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
16*b83c6205SAbhijit Gangurde 			   struct ionic_v1_cqe **cqe)
17*b83c6205SAbhijit Gangurde {
18*b83c6205SAbhijit Gangurde 	struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
19*b83c6205SAbhijit Gangurde 
20*b83c6205SAbhijit Gangurde 	if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
21*b83c6205SAbhijit Gangurde 		return false;
22*b83c6205SAbhijit Gangurde 
23*b83c6205SAbhijit Gangurde 	/* Prevent out-of-order reads of the CQE */
24*b83c6205SAbhijit Gangurde 	dma_rmb();
25*b83c6205SAbhijit Gangurde 
26*b83c6205SAbhijit Gangurde 	*cqe = qcqe;
27*b83c6205SAbhijit Gangurde 
28*b83c6205SAbhijit Gangurde 	return true;
29*b83c6205SAbhijit Gangurde }
30*b83c6205SAbhijit Gangurde 
31*b83c6205SAbhijit Gangurde static int ionic_flush_recv(struct ionic_qp *qp, struct ib_wc *wc)
32*b83c6205SAbhijit Gangurde {
33*b83c6205SAbhijit Gangurde 	struct ionic_rq_meta *meta;
34*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
35*b83c6205SAbhijit Gangurde 
36*b83c6205SAbhijit Gangurde 	if (!qp->rq_flush)
37*b83c6205SAbhijit Gangurde 		return 0;
38*b83c6205SAbhijit Gangurde 
39*b83c6205SAbhijit Gangurde 	if (ionic_queue_empty(&qp->rq))
40*b83c6205SAbhijit Gangurde 		return 0;
41*b83c6205SAbhijit Gangurde 
42*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_cons(&qp->rq);
43*b83c6205SAbhijit Gangurde 
44*b83c6205SAbhijit Gangurde 	/* wqe_id must be a valid queue index */
45*b83c6205SAbhijit Gangurde 	if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) {
46*b83c6205SAbhijit Gangurde 		ibdev_warn(qp->ibqp.device,
47*b83c6205SAbhijit Gangurde 			   "flush qp %u recv index %llu invalid\n",
48*b83c6205SAbhijit Gangurde 			   qp->qpid, (unsigned long long)wqe->base.wqe_id);
49*b83c6205SAbhijit Gangurde 		return -EIO;
50*b83c6205SAbhijit Gangurde 	}
51*b83c6205SAbhijit Gangurde 
52*b83c6205SAbhijit Gangurde 	/* wqe_id must indicate a request that is outstanding */
53*b83c6205SAbhijit Gangurde 	meta = &qp->rq_meta[wqe->base.wqe_id];
54*b83c6205SAbhijit Gangurde 	if (unlikely(meta->next != IONIC_META_POSTED)) {
55*b83c6205SAbhijit Gangurde 		ibdev_warn(qp->ibqp.device,
56*b83c6205SAbhijit Gangurde 			   "flush qp %u recv index %llu not posted\n",
57*b83c6205SAbhijit Gangurde 			   qp->qpid, (unsigned long long)wqe->base.wqe_id);
58*b83c6205SAbhijit Gangurde 		return -EIO;
59*b83c6205SAbhijit Gangurde 	}
60*b83c6205SAbhijit Gangurde 
61*b83c6205SAbhijit Gangurde 	ionic_queue_consume(&qp->rq);
62*b83c6205SAbhijit Gangurde 
63*b83c6205SAbhijit Gangurde 	memset(wc, 0, sizeof(*wc));
64*b83c6205SAbhijit Gangurde 
65*b83c6205SAbhijit Gangurde 	wc->status = IB_WC_WR_FLUSH_ERR;
66*b83c6205SAbhijit Gangurde 	wc->wr_id = meta->wrid;
67*b83c6205SAbhijit Gangurde 	wc->qp = &qp->ibqp;
68*b83c6205SAbhijit Gangurde 
69*b83c6205SAbhijit Gangurde 	meta->next = qp->rq_meta_head;
70*b83c6205SAbhijit Gangurde 	qp->rq_meta_head = meta;
71*b83c6205SAbhijit Gangurde 
72*b83c6205SAbhijit Gangurde 	return 1;
73*b83c6205SAbhijit Gangurde }
74*b83c6205SAbhijit Gangurde 
75*b83c6205SAbhijit Gangurde static int ionic_flush_recv_many(struct ionic_qp *qp,
76*b83c6205SAbhijit Gangurde 				 struct ib_wc *wc, int nwc)
77*b83c6205SAbhijit Gangurde {
78*b83c6205SAbhijit Gangurde 	int rc = 0, npolled = 0;
79*b83c6205SAbhijit Gangurde 
80*b83c6205SAbhijit Gangurde 	while (npolled < nwc) {
81*b83c6205SAbhijit Gangurde 		rc = ionic_flush_recv(qp, wc + npolled);
82*b83c6205SAbhijit Gangurde 		if (rc <= 0)
83*b83c6205SAbhijit Gangurde 			break;
84*b83c6205SAbhijit Gangurde 
85*b83c6205SAbhijit Gangurde 		npolled += rc;
86*b83c6205SAbhijit Gangurde 	}
87*b83c6205SAbhijit Gangurde 
88*b83c6205SAbhijit Gangurde 	return npolled ?: rc;
89*b83c6205SAbhijit Gangurde }
90*b83c6205SAbhijit Gangurde 
91*b83c6205SAbhijit Gangurde static int ionic_flush_send(struct ionic_qp *qp, struct ib_wc *wc)
92*b83c6205SAbhijit Gangurde {
93*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
94*b83c6205SAbhijit Gangurde 
95*b83c6205SAbhijit Gangurde 	if (!qp->sq_flush)
96*b83c6205SAbhijit Gangurde 		return 0;
97*b83c6205SAbhijit Gangurde 
98*b83c6205SAbhijit Gangurde 	if (ionic_queue_empty(&qp->sq))
99*b83c6205SAbhijit Gangurde 		return 0;
100*b83c6205SAbhijit Gangurde 
101*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.cons];
102*b83c6205SAbhijit Gangurde 
103*b83c6205SAbhijit Gangurde 	ionic_queue_consume(&qp->sq);
104*b83c6205SAbhijit Gangurde 
105*b83c6205SAbhijit Gangurde 	memset(wc, 0, sizeof(*wc));
106*b83c6205SAbhijit Gangurde 
107*b83c6205SAbhijit Gangurde 	wc->status = IB_WC_WR_FLUSH_ERR;
108*b83c6205SAbhijit Gangurde 	wc->wr_id = meta->wrid;
109*b83c6205SAbhijit Gangurde 	wc->qp = &qp->ibqp;
110*b83c6205SAbhijit Gangurde 
111*b83c6205SAbhijit Gangurde 	return 1;
112*b83c6205SAbhijit Gangurde }
113*b83c6205SAbhijit Gangurde 
114*b83c6205SAbhijit Gangurde static int ionic_flush_send_many(struct ionic_qp *qp,
115*b83c6205SAbhijit Gangurde 				 struct ib_wc *wc, int nwc)
116*b83c6205SAbhijit Gangurde {
117*b83c6205SAbhijit Gangurde 	int rc = 0, npolled = 0;
118*b83c6205SAbhijit Gangurde 
119*b83c6205SAbhijit Gangurde 	while (npolled < nwc) {
120*b83c6205SAbhijit Gangurde 		rc = ionic_flush_send(qp, wc + npolled);
121*b83c6205SAbhijit Gangurde 		if (rc <= 0)
122*b83c6205SAbhijit Gangurde 			break;
123*b83c6205SAbhijit Gangurde 
124*b83c6205SAbhijit Gangurde 		npolled += rc;
125*b83c6205SAbhijit Gangurde 	}
126*b83c6205SAbhijit Gangurde 
127*b83c6205SAbhijit Gangurde 	return npolled ?: rc;
128*b83c6205SAbhijit Gangurde }
129*b83c6205SAbhijit Gangurde 
130*b83c6205SAbhijit Gangurde static int ionic_poll_recv(struct ionic_ibdev *dev, struct ionic_cq *cq,
131*b83c6205SAbhijit Gangurde 			   struct ionic_qp *cqe_qp, struct ionic_v1_cqe *cqe,
132*b83c6205SAbhijit Gangurde 			   struct ib_wc *wc)
133*b83c6205SAbhijit Gangurde {
134*b83c6205SAbhijit Gangurde 	struct ionic_qp *qp = NULL;
135*b83c6205SAbhijit Gangurde 	struct ionic_rq_meta *meta;
136*b83c6205SAbhijit Gangurde 	u32 src_qpn, st_len;
137*b83c6205SAbhijit Gangurde 	u16 vlan_tag;
138*b83c6205SAbhijit Gangurde 	u8 op;
139*b83c6205SAbhijit Gangurde 
140*b83c6205SAbhijit Gangurde 	if (cqe_qp->rq_flush)
141*b83c6205SAbhijit Gangurde 		return 0;
142*b83c6205SAbhijit Gangurde 
143*b83c6205SAbhijit Gangurde 	qp = cqe_qp;
144*b83c6205SAbhijit Gangurde 
145*b83c6205SAbhijit Gangurde 	st_len = be32_to_cpu(cqe->status_length);
146*b83c6205SAbhijit Gangurde 
147*b83c6205SAbhijit Gangurde 	/* ignore wqe_id in case of flush error */
148*b83c6205SAbhijit Gangurde 	if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
149*b83c6205SAbhijit Gangurde 		cqe_qp->rq_flush = true;
150*b83c6205SAbhijit Gangurde 		cq->flush = true;
151*b83c6205SAbhijit Gangurde 		list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
152*b83c6205SAbhijit Gangurde 
153*b83c6205SAbhijit Gangurde 		/* posted recvs (if any) flushed by ionic_flush_recv */
154*b83c6205SAbhijit Gangurde 		return 0;
155*b83c6205SAbhijit Gangurde 	}
156*b83c6205SAbhijit Gangurde 
157*b83c6205SAbhijit Gangurde 	/* there had better be something in the recv queue to complete */
158*b83c6205SAbhijit Gangurde 	if (ionic_queue_empty(&qp->rq)) {
159*b83c6205SAbhijit Gangurde 		ibdev_warn(&dev->ibdev, "qp %u is empty\n", qp->qpid);
160*b83c6205SAbhijit Gangurde 		return -EIO;
161*b83c6205SAbhijit Gangurde 	}
162*b83c6205SAbhijit Gangurde 
163*b83c6205SAbhijit Gangurde 	/* wqe_id must be a valid queue index */
164*b83c6205SAbhijit Gangurde 	if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) {
165*b83c6205SAbhijit Gangurde 		ibdev_warn(&dev->ibdev,
166*b83c6205SAbhijit Gangurde 			   "qp %u recv index %llu invalid\n",
167*b83c6205SAbhijit Gangurde 			   qp->qpid, (unsigned long long)cqe->recv.wqe_id);
168*b83c6205SAbhijit Gangurde 		return -EIO;
169*b83c6205SAbhijit Gangurde 	}
170*b83c6205SAbhijit Gangurde 
171*b83c6205SAbhijit Gangurde 	/* wqe_id must indicate a request that is outstanding */
172*b83c6205SAbhijit Gangurde 	meta = &qp->rq_meta[cqe->recv.wqe_id];
173*b83c6205SAbhijit Gangurde 	if (unlikely(meta->next != IONIC_META_POSTED)) {
174*b83c6205SAbhijit Gangurde 		ibdev_warn(&dev->ibdev,
175*b83c6205SAbhijit Gangurde 			   "qp %u recv index %llu not posted\n",
176*b83c6205SAbhijit Gangurde 			   qp->qpid, (unsigned long long)cqe->recv.wqe_id);
177*b83c6205SAbhijit Gangurde 		return -EIO;
178*b83c6205SAbhijit Gangurde 	}
179*b83c6205SAbhijit Gangurde 
180*b83c6205SAbhijit Gangurde 	meta->next = qp->rq_meta_head;
181*b83c6205SAbhijit Gangurde 	qp->rq_meta_head = meta;
182*b83c6205SAbhijit Gangurde 
183*b83c6205SAbhijit Gangurde 	memset(wc, 0, sizeof(*wc));
184*b83c6205SAbhijit Gangurde 
185*b83c6205SAbhijit Gangurde 	wc->wr_id = meta->wrid;
186*b83c6205SAbhijit Gangurde 
187*b83c6205SAbhijit Gangurde 	wc->qp = &cqe_qp->ibqp;
188*b83c6205SAbhijit Gangurde 
189*b83c6205SAbhijit Gangurde 	if (ionic_v1_cqe_error(cqe)) {
190*b83c6205SAbhijit Gangurde 		wc->vendor_err = st_len;
191*b83c6205SAbhijit Gangurde 		wc->status = ionic_to_ib_status(st_len);
192*b83c6205SAbhijit Gangurde 
193*b83c6205SAbhijit Gangurde 		cqe_qp->rq_flush = true;
194*b83c6205SAbhijit Gangurde 		cq->flush = true;
195*b83c6205SAbhijit Gangurde 		list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
196*b83c6205SAbhijit Gangurde 
197*b83c6205SAbhijit Gangurde 		ibdev_warn(&dev->ibdev,
198*b83c6205SAbhijit Gangurde 			   "qp %d recv cqe with error\n", qp->qpid);
199*b83c6205SAbhijit Gangurde 		print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
200*b83c6205SAbhijit Gangurde 			       cqe, BIT(cq->q.stride_log2), true);
201*b83c6205SAbhijit Gangurde 		goto out;
202*b83c6205SAbhijit Gangurde 	}
203*b83c6205SAbhijit Gangurde 
204*b83c6205SAbhijit Gangurde 	wc->vendor_err = 0;
205*b83c6205SAbhijit Gangurde 	wc->status = IB_WC_SUCCESS;
206*b83c6205SAbhijit Gangurde 
207*b83c6205SAbhijit Gangurde 	src_qpn = be32_to_cpu(cqe->recv.src_qpn_op);
208*b83c6205SAbhijit Gangurde 	op = src_qpn >> IONIC_V1_CQE_RECV_OP_SHIFT;
209*b83c6205SAbhijit Gangurde 
210*b83c6205SAbhijit Gangurde 	src_qpn &= IONIC_V1_CQE_RECV_QPN_MASK;
211*b83c6205SAbhijit Gangurde 	op &= IONIC_V1_CQE_RECV_OP_MASK;
212*b83c6205SAbhijit Gangurde 
213*b83c6205SAbhijit Gangurde 	wc->opcode = IB_WC_RECV;
214*b83c6205SAbhijit Gangurde 	switch (op) {
215*b83c6205SAbhijit Gangurde 	case IONIC_V1_CQE_RECV_OP_RDMA_IMM:
216*b83c6205SAbhijit Gangurde 		wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
217*b83c6205SAbhijit Gangurde 		wc->wc_flags |= IB_WC_WITH_IMM;
218*b83c6205SAbhijit Gangurde 		wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
219*b83c6205SAbhijit Gangurde 		break;
220*b83c6205SAbhijit Gangurde 	case IONIC_V1_CQE_RECV_OP_SEND_IMM:
221*b83c6205SAbhijit Gangurde 		wc->wc_flags |= IB_WC_WITH_IMM;
222*b83c6205SAbhijit Gangurde 		wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
223*b83c6205SAbhijit Gangurde 		break;
224*b83c6205SAbhijit Gangurde 	case IONIC_V1_CQE_RECV_OP_SEND_INV:
225*b83c6205SAbhijit Gangurde 		wc->wc_flags |= IB_WC_WITH_INVALIDATE;
226*b83c6205SAbhijit Gangurde 		wc->ex.invalidate_rkey = be32_to_cpu(cqe->recv.imm_data_rkey);
227*b83c6205SAbhijit Gangurde 		break;
228*b83c6205SAbhijit Gangurde 	}
229*b83c6205SAbhijit Gangurde 
230*b83c6205SAbhijit Gangurde 	wc->byte_len = st_len;
231*b83c6205SAbhijit Gangurde 	wc->src_qp = src_qpn;
232*b83c6205SAbhijit Gangurde 
233*b83c6205SAbhijit Gangurde 	if (qp->ibqp.qp_type == IB_QPT_UD ||
234*b83c6205SAbhijit Gangurde 	    qp->ibqp.qp_type == IB_QPT_GSI) {
235*b83c6205SAbhijit Gangurde 		wc->wc_flags |= IB_WC_GRH | IB_WC_WITH_SMAC;
236*b83c6205SAbhijit Gangurde 		ether_addr_copy(wc->smac, cqe->recv.src_mac);
237*b83c6205SAbhijit Gangurde 
238*b83c6205SAbhijit Gangurde 		wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
239*b83c6205SAbhijit Gangurde 		if (ionic_v1_cqe_recv_is_ipv4(cqe))
240*b83c6205SAbhijit Gangurde 			wc->network_hdr_type = RDMA_NETWORK_IPV4;
241*b83c6205SAbhijit Gangurde 		else
242*b83c6205SAbhijit Gangurde 			wc->network_hdr_type = RDMA_NETWORK_IPV6;
243*b83c6205SAbhijit Gangurde 
244*b83c6205SAbhijit Gangurde 		if (ionic_v1_cqe_recv_is_vlan(cqe))
245*b83c6205SAbhijit Gangurde 			wc->wc_flags |= IB_WC_WITH_VLAN;
246*b83c6205SAbhijit Gangurde 
247*b83c6205SAbhijit Gangurde 		/* vlan_tag in cqe will be valid from dpath even if no vlan */
248*b83c6205SAbhijit Gangurde 		vlan_tag = be16_to_cpu(cqe->recv.vlan_tag);
249*b83c6205SAbhijit Gangurde 		wc->vlan_id = vlan_tag & 0xfff; /* 802.1q VID */
250*b83c6205SAbhijit Gangurde 		wc->sl = vlan_tag >> VLAN_PRIO_SHIFT; /* 802.1q PCP */
251*b83c6205SAbhijit Gangurde 	}
252*b83c6205SAbhijit Gangurde 
253*b83c6205SAbhijit Gangurde 	wc->pkey_index = 0;
254*b83c6205SAbhijit Gangurde 	wc->port_num = 1;
255*b83c6205SAbhijit Gangurde 
256*b83c6205SAbhijit Gangurde out:
257*b83c6205SAbhijit Gangurde 	ionic_queue_consume(&qp->rq);
258*b83c6205SAbhijit Gangurde 
259*b83c6205SAbhijit Gangurde 	return 1;
260*b83c6205SAbhijit Gangurde }
261*b83c6205SAbhijit Gangurde 
262*b83c6205SAbhijit Gangurde static bool ionic_peek_send(struct ionic_qp *qp)
263*b83c6205SAbhijit Gangurde {
264*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
265*b83c6205SAbhijit Gangurde 
266*b83c6205SAbhijit Gangurde 	if (qp->sq_flush)
267*b83c6205SAbhijit Gangurde 		return false;
268*b83c6205SAbhijit Gangurde 
269*b83c6205SAbhijit Gangurde 	/* completed all send queue requests */
270*b83c6205SAbhijit Gangurde 	if (ionic_queue_empty(&qp->sq))
271*b83c6205SAbhijit Gangurde 		return false;
272*b83c6205SAbhijit Gangurde 
273*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.cons];
274*b83c6205SAbhijit Gangurde 
275*b83c6205SAbhijit Gangurde 	/* waiting for remote completion */
276*b83c6205SAbhijit Gangurde 	if (meta->remote && meta->seq == qp->sq_msn_cons)
277*b83c6205SAbhijit Gangurde 		return false;
278*b83c6205SAbhijit Gangurde 
279*b83c6205SAbhijit Gangurde 	/* waiting for local completion */
280*b83c6205SAbhijit Gangurde 	if (!meta->remote && !meta->local_comp)
281*b83c6205SAbhijit Gangurde 		return false;
282*b83c6205SAbhijit Gangurde 
283*b83c6205SAbhijit Gangurde 	return true;
284*b83c6205SAbhijit Gangurde }
285*b83c6205SAbhijit Gangurde 
286*b83c6205SAbhijit Gangurde static int ionic_poll_send(struct ionic_ibdev *dev, struct ionic_cq *cq,
287*b83c6205SAbhijit Gangurde 			   struct ionic_qp *qp, struct ib_wc *wc)
288*b83c6205SAbhijit Gangurde {
289*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
290*b83c6205SAbhijit Gangurde 
291*b83c6205SAbhijit Gangurde 	if (qp->sq_flush)
292*b83c6205SAbhijit Gangurde 		return 0;
293*b83c6205SAbhijit Gangurde 
294*b83c6205SAbhijit Gangurde 	do {
295*b83c6205SAbhijit Gangurde 		/* completed all send queue requests */
296*b83c6205SAbhijit Gangurde 		if (ionic_queue_empty(&qp->sq))
297*b83c6205SAbhijit Gangurde 			goto out_empty;
298*b83c6205SAbhijit Gangurde 
299*b83c6205SAbhijit Gangurde 		meta = &qp->sq_meta[qp->sq.cons];
300*b83c6205SAbhijit Gangurde 
301*b83c6205SAbhijit Gangurde 		/* waiting for remote completion */
302*b83c6205SAbhijit Gangurde 		if (meta->remote && meta->seq == qp->sq_msn_cons)
303*b83c6205SAbhijit Gangurde 			goto out_empty;
304*b83c6205SAbhijit Gangurde 
305*b83c6205SAbhijit Gangurde 		/* waiting for local completion */
306*b83c6205SAbhijit Gangurde 		if (!meta->remote && !meta->local_comp)
307*b83c6205SAbhijit Gangurde 			goto out_empty;
308*b83c6205SAbhijit Gangurde 
309*b83c6205SAbhijit Gangurde 		ionic_queue_consume(&qp->sq);
310*b83c6205SAbhijit Gangurde 
311*b83c6205SAbhijit Gangurde 		/* produce wc only if signaled or error status */
312*b83c6205SAbhijit Gangurde 	} while (!meta->signal && meta->ibsts == IB_WC_SUCCESS);
313*b83c6205SAbhijit Gangurde 
314*b83c6205SAbhijit Gangurde 	memset(wc, 0, sizeof(*wc));
315*b83c6205SAbhijit Gangurde 
316*b83c6205SAbhijit Gangurde 	wc->status = meta->ibsts;
317*b83c6205SAbhijit Gangurde 	wc->wr_id = meta->wrid;
318*b83c6205SAbhijit Gangurde 	wc->qp = &qp->ibqp;
319*b83c6205SAbhijit Gangurde 
320*b83c6205SAbhijit Gangurde 	if (meta->ibsts == IB_WC_SUCCESS) {
321*b83c6205SAbhijit Gangurde 		wc->byte_len = meta->len;
322*b83c6205SAbhijit Gangurde 		wc->opcode = meta->ibop;
323*b83c6205SAbhijit Gangurde 	} else {
324*b83c6205SAbhijit Gangurde 		wc->vendor_err = meta->len;
325*b83c6205SAbhijit Gangurde 
326*b83c6205SAbhijit Gangurde 		qp->sq_flush = true;
327*b83c6205SAbhijit Gangurde 		cq->flush = true;
328*b83c6205SAbhijit Gangurde 		list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
329*b83c6205SAbhijit Gangurde 	}
330*b83c6205SAbhijit Gangurde 
331*b83c6205SAbhijit Gangurde 	return 1;
332*b83c6205SAbhijit Gangurde 
333*b83c6205SAbhijit Gangurde out_empty:
334*b83c6205SAbhijit Gangurde 	if (qp->sq_flush_rcvd) {
335*b83c6205SAbhijit Gangurde 		qp->sq_flush = true;
336*b83c6205SAbhijit Gangurde 		cq->flush = true;
337*b83c6205SAbhijit Gangurde 		list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
338*b83c6205SAbhijit Gangurde 	}
339*b83c6205SAbhijit Gangurde 	return 0;
340*b83c6205SAbhijit Gangurde }
341*b83c6205SAbhijit Gangurde 
342*b83c6205SAbhijit Gangurde static int ionic_poll_send_many(struct ionic_ibdev *dev, struct ionic_cq *cq,
343*b83c6205SAbhijit Gangurde 				struct ionic_qp *qp, struct ib_wc *wc, int nwc)
344*b83c6205SAbhijit Gangurde {
345*b83c6205SAbhijit Gangurde 	int rc = 0, npolled = 0;
346*b83c6205SAbhijit Gangurde 
347*b83c6205SAbhijit Gangurde 	while (npolled < nwc) {
348*b83c6205SAbhijit Gangurde 		rc = ionic_poll_send(dev, cq, qp, wc + npolled);
349*b83c6205SAbhijit Gangurde 		if (rc <= 0)
350*b83c6205SAbhijit Gangurde 			break;
351*b83c6205SAbhijit Gangurde 
352*b83c6205SAbhijit Gangurde 		npolled += rc;
353*b83c6205SAbhijit Gangurde 	}
354*b83c6205SAbhijit Gangurde 
355*b83c6205SAbhijit Gangurde 	return npolled ?: rc;
356*b83c6205SAbhijit Gangurde }
357*b83c6205SAbhijit Gangurde 
358*b83c6205SAbhijit Gangurde static int ionic_validate_cons(u16 prod, u16 cons,
359*b83c6205SAbhijit Gangurde 			       u16 comp, u16 mask)
360*b83c6205SAbhijit Gangurde {
361*b83c6205SAbhijit Gangurde 	if (((prod - cons) & mask) <= ((comp - cons) & mask))
362*b83c6205SAbhijit Gangurde 		return -EIO;
363*b83c6205SAbhijit Gangurde 
364*b83c6205SAbhijit Gangurde 	return 0;
365*b83c6205SAbhijit Gangurde }
366*b83c6205SAbhijit Gangurde 
367*b83c6205SAbhijit Gangurde static int ionic_comp_msn(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
368*b83c6205SAbhijit Gangurde {
369*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
370*b83c6205SAbhijit Gangurde 	u16 cqe_seq, cqe_idx;
371*b83c6205SAbhijit Gangurde 	int rc;
372*b83c6205SAbhijit Gangurde 
373*b83c6205SAbhijit Gangurde 	if (qp->sq_flush)
374*b83c6205SAbhijit Gangurde 		return 0;
375*b83c6205SAbhijit Gangurde 
376*b83c6205SAbhijit Gangurde 	cqe_seq = be32_to_cpu(cqe->send.msg_msn) & qp->sq.mask;
377*b83c6205SAbhijit Gangurde 
378*b83c6205SAbhijit Gangurde 	rc = ionic_validate_cons(qp->sq_msn_prod,
379*b83c6205SAbhijit Gangurde 				 qp->sq_msn_cons,
380*b83c6205SAbhijit Gangurde 				 cqe_seq - 1,
381*b83c6205SAbhijit Gangurde 				 qp->sq.mask);
382*b83c6205SAbhijit Gangurde 	if (rc) {
383*b83c6205SAbhijit Gangurde 		ibdev_warn(qp->ibqp.device,
384*b83c6205SAbhijit Gangurde 			   "qp %u bad msn %#x seq %u for prod %u cons %u\n",
385*b83c6205SAbhijit Gangurde 			   qp->qpid, be32_to_cpu(cqe->send.msg_msn),
386*b83c6205SAbhijit Gangurde 			   cqe_seq, qp->sq_msn_prod, qp->sq_msn_cons);
387*b83c6205SAbhijit Gangurde 		return rc;
388*b83c6205SAbhijit Gangurde 	}
389*b83c6205SAbhijit Gangurde 
390*b83c6205SAbhijit Gangurde 	qp->sq_msn_cons = cqe_seq;
391*b83c6205SAbhijit Gangurde 
392*b83c6205SAbhijit Gangurde 	if (ionic_v1_cqe_error(cqe)) {
393*b83c6205SAbhijit Gangurde 		cqe_idx = qp->sq_msn_idx[(cqe_seq - 1) & qp->sq.mask];
394*b83c6205SAbhijit Gangurde 
395*b83c6205SAbhijit Gangurde 		meta = &qp->sq_meta[cqe_idx];
396*b83c6205SAbhijit Gangurde 		meta->len = be32_to_cpu(cqe->status_length);
397*b83c6205SAbhijit Gangurde 		meta->ibsts = ionic_to_ib_status(meta->len);
398*b83c6205SAbhijit Gangurde 
399*b83c6205SAbhijit Gangurde 		ibdev_warn(qp->ibqp.device,
400*b83c6205SAbhijit Gangurde 			   "qp %d msn cqe with error\n", qp->qpid);
401*b83c6205SAbhijit Gangurde 		print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
402*b83c6205SAbhijit Gangurde 			       cqe, sizeof(*cqe), true);
403*b83c6205SAbhijit Gangurde 	}
404*b83c6205SAbhijit Gangurde 
405*b83c6205SAbhijit Gangurde 	return 0;
406*b83c6205SAbhijit Gangurde }
407*b83c6205SAbhijit Gangurde 
408*b83c6205SAbhijit Gangurde static int ionic_comp_npg(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
409*b83c6205SAbhijit Gangurde {
410*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
411*b83c6205SAbhijit Gangurde 	u16 cqe_idx;
412*b83c6205SAbhijit Gangurde 	u32 st_len;
413*b83c6205SAbhijit Gangurde 
414*b83c6205SAbhijit Gangurde 	if (qp->sq_flush)
415*b83c6205SAbhijit Gangurde 		return 0;
416*b83c6205SAbhijit Gangurde 
417*b83c6205SAbhijit Gangurde 	st_len = be32_to_cpu(cqe->status_length);
418*b83c6205SAbhijit Gangurde 
419*b83c6205SAbhijit Gangurde 	if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
420*b83c6205SAbhijit Gangurde 		/*
421*b83c6205SAbhijit Gangurde 		 * Flush cqe does not consume a wqe on the device, and maybe
422*b83c6205SAbhijit Gangurde 		 * no such work request is posted.
423*b83c6205SAbhijit Gangurde 		 *
424*b83c6205SAbhijit Gangurde 		 * The driver should begin flushing after the last indicated
425*b83c6205SAbhijit Gangurde 		 * normal or error completion.	Here, only set a hint that the
426*b83c6205SAbhijit Gangurde 		 * flush request was indicated.	 In poll_send, if nothing more
427*b83c6205SAbhijit Gangurde 		 * can be polled normally, then begin flushing.
428*b83c6205SAbhijit Gangurde 		 */
429*b83c6205SAbhijit Gangurde 		qp->sq_flush_rcvd = true;
430*b83c6205SAbhijit Gangurde 		return 0;
431*b83c6205SAbhijit Gangurde 	}
432*b83c6205SAbhijit Gangurde 
433*b83c6205SAbhijit Gangurde 	cqe_idx = cqe->send.npg_wqe_id & qp->sq.mask;
434*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[cqe_idx];
435*b83c6205SAbhijit Gangurde 	meta->local_comp = true;
436*b83c6205SAbhijit Gangurde 
437*b83c6205SAbhijit Gangurde 	if (ionic_v1_cqe_error(cqe)) {
438*b83c6205SAbhijit Gangurde 		meta->len = st_len;
439*b83c6205SAbhijit Gangurde 		meta->ibsts = ionic_to_ib_status(st_len);
440*b83c6205SAbhijit Gangurde 		meta->remote = false;
441*b83c6205SAbhijit Gangurde 		ibdev_warn(qp->ibqp.device,
442*b83c6205SAbhijit Gangurde 			   "qp %d npg cqe with error\n", qp->qpid);
443*b83c6205SAbhijit Gangurde 		print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
444*b83c6205SAbhijit Gangurde 			       cqe, sizeof(*cqe), true);
445*b83c6205SAbhijit Gangurde 	}
446*b83c6205SAbhijit Gangurde 
447*b83c6205SAbhijit Gangurde 	return 0;
448*b83c6205SAbhijit Gangurde }
449*b83c6205SAbhijit Gangurde 
450*b83c6205SAbhijit Gangurde static void ionic_reserve_sync_cq(struct ionic_ibdev *dev, struct ionic_cq *cq)
451*b83c6205SAbhijit Gangurde {
452*b83c6205SAbhijit Gangurde 	if (!ionic_queue_empty(&cq->q)) {
453*b83c6205SAbhijit Gangurde 		cq->credit += ionic_queue_length(&cq->q);
454*b83c6205SAbhijit Gangurde 		cq->q.cons = cq->q.prod;
455*b83c6205SAbhijit Gangurde 
456*b83c6205SAbhijit Gangurde 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
457*b83c6205SAbhijit Gangurde 				 ionic_queue_dbell_val(&cq->q));
458*b83c6205SAbhijit Gangurde 	}
459*b83c6205SAbhijit Gangurde }
460*b83c6205SAbhijit Gangurde 
461*b83c6205SAbhijit Gangurde static void ionic_reserve_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
462*b83c6205SAbhijit Gangurde 			     int spend)
463*b83c6205SAbhijit Gangurde {
464*b83c6205SAbhijit Gangurde 	cq->credit -= spend;
465*b83c6205SAbhijit Gangurde 
466*b83c6205SAbhijit Gangurde 	if (cq->credit <= 0)
467*b83c6205SAbhijit Gangurde 		ionic_reserve_sync_cq(dev, cq);
468*b83c6205SAbhijit Gangurde }
469*b83c6205SAbhijit Gangurde 
470*b83c6205SAbhijit Gangurde static int ionic_poll_vcq_cq(struct ionic_ibdev *dev,
471*b83c6205SAbhijit Gangurde 			     struct ionic_cq *cq,
472*b83c6205SAbhijit Gangurde 			     int nwc, struct ib_wc *wc)
473*b83c6205SAbhijit Gangurde {
474*b83c6205SAbhijit Gangurde 	struct ionic_qp *qp, *qp_next;
475*b83c6205SAbhijit Gangurde 	struct ionic_v1_cqe *cqe;
476*b83c6205SAbhijit Gangurde 	int rc = 0, npolled = 0;
477*b83c6205SAbhijit Gangurde 	unsigned long irqflags;
478*b83c6205SAbhijit Gangurde 	u32 qtf, qid;
479*b83c6205SAbhijit Gangurde 	bool peek;
480*b83c6205SAbhijit Gangurde 	u8 type;
481*b83c6205SAbhijit Gangurde 
482*b83c6205SAbhijit Gangurde 	if (nwc < 1)
483*b83c6205SAbhijit Gangurde 		return 0;
484*b83c6205SAbhijit Gangurde 
485*b83c6205SAbhijit Gangurde 	spin_lock_irqsave(&cq->lock, irqflags);
486*b83c6205SAbhijit Gangurde 
487*b83c6205SAbhijit Gangurde 	/* poll already indicated work completions for send queue */
488*b83c6205SAbhijit Gangurde 	list_for_each_entry_safe(qp, qp_next, &cq->poll_sq, cq_poll_sq) {
489*b83c6205SAbhijit Gangurde 		if (npolled == nwc)
490*b83c6205SAbhijit Gangurde 			goto out;
491*b83c6205SAbhijit Gangurde 
492*b83c6205SAbhijit Gangurde 		spin_lock(&qp->sq_lock);
493*b83c6205SAbhijit Gangurde 		rc = ionic_poll_send_many(dev, cq, qp, wc + npolled,
494*b83c6205SAbhijit Gangurde 					  nwc - npolled);
495*b83c6205SAbhijit Gangurde 		spin_unlock(&qp->sq_lock);
496*b83c6205SAbhijit Gangurde 
497*b83c6205SAbhijit Gangurde 		if (rc > 0)
498*b83c6205SAbhijit Gangurde 			npolled += rc;
499*b83c6205SAbhijit Gangurde 
500*b83c6205SAbhijit Gangurde 		if (npolled < nwc)
501*b83c6205SAbhijit Gangurde 			list_del_init(&qp->cq_poll_sq);
502*b83c6205SAbhijit Gangurde 	}
503*b83c6205SAbhijit Gangurde 
504*b83c6205SAbhijit Gangurde 	/* poll for more work completions */
505*b83c6205SAbhijit Gangurde 	while (likely(ionic_next_cqe(dev, cq, &cqe))) {
506*b83c6205SAbhijit Gangurde 		if (npolled == nwc)
507*b83c6205SAbhijit Gangurde 			goto out;
508*b83c6205SAbhijit Gangurde 
509*b83c6205SAbhijit Gangurde 		qtf = ionic_v1_cqe_qtf(cqe);
510*b83c6205SAbhijit Gangurde 		qid = ionic_v1_cqe_qtf_qid(qtf);
511*b83c6205SAbhijit Gangurde 		type = ionic_v1_cqe_qtf_type(qtf);
512*b83c6205SAbhijit Gangurde 
513*b83c6205SAbhijit Gangurde 		/*
514*b83c6205SAbhijit Gangurde 		 * Safe to access QP without additional reference here as,
515*b83c6205SAbhijit Gangurde 		 * 1. We hold cq->lock throughout
516*b83c6205SAbhijit Gangurde 		 * 2. ionic_destroy_qp() acquires the same cq->lock before cleanup
517*b83c6205SAbhijit Gangurde 		 * 3. QP is removed from qp_tbl before any cleanup begins
518*b83c6205SAbhijit Gangurde 		 * This ensures no concurrent access between polling and destruction.
519*b83c6205SAbhijit Gangurde 		 */
520*b83c6205SAbhijit Gangurde 		qp = xa_load(&dev->qp_tbl, qid);
521*b83c6205SAbhijit Gangurde 		if (unlikely(!qp)) {
522*b83c6205SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid);
523*b83c6205SAbhijit Gangurde 			goto cq_next;
524*b83c6205SAbhijit Gangurde 		}
525*b83c6205SAbhijit Gangurde 
526*b83c6205SAbhijit Gangurde 		switch (type) {
527*b83c6205SAbhijit Gangurde 		case IONIC_V1_CQE_TYPE_RECV:
528*b83c6205SAbhijit Gangurde 			spin_lock(&qp->rq_lock);
529*b83c6205SAbhijit Gangurde 			rc = ionic_poll_recv(dev, cq, qp, cqe, wc + npolled);
530*b83c6205SAbhijit Gangurde 			spin_unlock(&qp->rq_lock);
531*b83c6205SAbhijit Gangurde 
532*b83c6205SAbhijit Gangurde 			if (rc < 0)
533*b83c6205SAbhijit Gangurde 				goto out;
534*b83c6205SAbhijit Gangurde 
535*b83c6205SAbhijit Gangurde 			npolled += rc;
536*b83c6205SAbhijit Gangurde 
537*b83c6205SAbhijit Gangurde 			break;
538*b83c6205SAbhijit Gangurde 
539*b83c6205SAbhijit Gangurde 		case IONIC_V1_CQE_TYPE_SEND_MSN:
540*b83c6205SAbhijit Gangurde 			spin_lock(&qp->sq_lock);
541*b83c6205SAbhijit Gangurde 			rc = ionic_comp_msn(qp, cqe);
542*b83c6205SAbhijit Gangurde 			if (!rc) {
543*b83c6205SAbhijit Gangurde 				rc = ionic_poll_send_many(dev, cq, qp,
544*b83c6205SAbhijit Gangurde 							  wc + npolled,
545*b83c6205SAbhijit Gangurde 							  nwc - npolled);
546*b83c6205SAbhijit Gangurde 				peek = ionic_peek_send(qp);
547*b83c6205SAbhijit Gangurde 			}
548*b83c6205SAbhijit Gangurde 			spin_unlock(&qp->sq_lock);
549*b83c6205SAbhijit Gangurde 
550*b83c6205SAbhijit Gangurde 			if (rc < 0)
551*b83c6205SAbhijit Gangurde 				goto out;
552*b83c6205SAbhijit Gangurde 
553*b83c6205SAbhijit Gangurde 			npolled += rc;
554*b83c6205SAbhijit Gangurde 
555*b83c6205SAbhijit Gangurde 			if (peek)
556*b83c6205SAbhijit Gangurde 				list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
557*b83c6205SAbhijit Gangurde 			break;
558*b83c6205SAbhijit Gangurde 
559*b83c6205SAbhijit Gangurde 		case IONIC_V1_CQE_TYPE_SEND_NPG:
560*b83c6205SAbhijit Gangurde 			spin_lock(&qp->sq_lock);
561*b83c6205SAbhijit Gangurde 			rc = ionic_comp_npg(qp, cqe);
562*b83c6205SAbhijit Gangurde 			if (!rc) {
563*b83c6205SAbhijit Gangurde 				rc = ionic_poll_send_many(dev, cq, qp,
564*b83c6205SAbhijit Gangurde 							  wc + npolled,
565*b83c6205SAbhijit Gangurde 							  nwc - npolled);
566*b83c6205SAbhijit Gangurde 				peek = ionic_peek_send(qp);
567*b83c6205SAbhijit Gangurde 			}
568*b83c6205SAbhijit Gangurde 			spin_unlock(&qp->sq_lock);
569*b83c6205SAbhijit Gangurde 
570*b83c6205SAbhijit Gangurde 			if (rc < 0)
571*b83c6205SAbhijit Gangurde 				goto out;
572*b83c6205SAbhijit Gangurde 
573*b83c6205SAbhijit Gangurde 			npolled += rc;
574*b83c6205SAbhijit Gangurde 
575*b83c6205SAbhijit Gangurde 			if (peek)
576*b83c6205SAbhijit Gangurde 				list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
577*b83c6205SAbhijit Gangurde 			break;
578*b83c6205SAbhijit Gangurde 
579*b83c6205SAbhijit Gangurde 		default:
580*b83c6205SAbhijit Gangurde 			ibdev_warn(&dev->ibdev,
581*b83c6205SAbhijit Gangurde 				   "unexpected cqe type %u\n", type);
582*b83c6205SAbhijit Gangurde 			rc = -EIO;
583*b83c6205SAbhijit Gangurde 			goto out;
584*b83c6205SAbhijit Gangurde 		}
585*b83c6205SAbhijit Gangurde 
586*b83c6205SAbhijit Gangurde cq_next:
587*b83c6205SAbhijit Gangurde 		ionic_queue_produce(&cq->q);
588*b83c6205SAbhijit Gangurde 		cq->color = ionic_color_wrap(cq->q.prod, cq->color);
589*b83c6205SAbhijit Gangurde 	}
590*b83c6205SAbhijit Gangurde 
591*b83c6205SAbhijit Gangurde 	/* lastly, flush send and recv queues */
592*b83c6205SAbhijit Gangurde 	if (likely(!cq->flush))
593*b83c6205SAbhijit Gangurde 		goto out;
594*b83c6205SAbhijit Gangurde 
595*b83c6205SAbhijit Gangurde 	cq->flush = false;
596*b83c6205SAbhijit Gangurde 
597*b83c6205SAbhijit Gangurde 	list_for_each_entry_safe(qp, qp_next, &cq->flush_sq, cq_flush_sq) {
598*b83c6205SAbhijit Gangurde 		if (npolled == nwc)
599*b83c6205SAbhijit Gangurde 			goto out;
600*b83c6205SAbhijit Gangurde 
601*b83c6205SAbhijit Gangurde 		spin_lock(&qp->sq_lock);
602*b83c6205SAbhijit Gangurde 		rc = ionic_flush_send_many(qp, wc + npolled, nwc - npolled);
603*b83c6205SAbhijit Gangurde 		spin_unlock(&qp->sq_lock);
604*b83c6205SAbhijit Gangurde 
605*b83c6205SAbhijit Gangurde 		if (rc > 0)
606*b83c6205SAbhijit Gangurde 			npolled += rc;
607*b83c6205SAbhijit Gangurde 
608*b83c6205SAbhijit Gangurde 		if (npolled < nwc)
609*b83c6205SAbhijit Gangurde 			list_del_init(&qp->cq_flush_sq);
610*b83c6205SAbhijit Gangurde 		else
611*b83c6205SAbhijit Gangurde 			cq->flush = true;
612*b83c6205SAbhijit Gangurde 	}
613*b83c6205SAbhijit Gangurde 
614*b83c6205SAbhijit Gangurde 	list_for_each_entry_safe(qp, qp_next, &cq->flush_rq, cq_flush_rq) {
615*b83c6205SAbhijit Gangurde 		if (npolled == nwc)
616*b83c6205SAbhijit Gangurde 			goto out;
617*b83c6205SAbhijit Gangurde 
618*b83c6205SAbhijit Gangurde 		spin_lock(&qp->rq_lock);
619*b83c6205SAbhijit Gangurde 		rc = ionic_flush_recv_many(qp, wc + npolled, nwc - npolled);
620*b83c6205SAbhijit Gangurde 		spin_unlock(&qp->rq_lock);
621*b83c6205SAbhijit Gangurde 
622*b83c6205SAbhijit Gangurde 		if (rc > 0)
623*b83c6205SAbhijit Gangurde 			npolled += rc;
624*b83c6205SAbhijit Gangurde 
625*b83c6205SAbhijit Gangurde 		if (npolled < nwc)
626*b83c6205SAbhijit Gangurde 			list_del_init(&qp->cq_flush_rq);
627*b83c6205SAbhijit Gangurde 		else
628*b83c6205SAbhijit Gangurde 			cq->flush = true;
629*b83c6205SAbhijit Gangurde 	}
630*b83c6205SAbhijit Gangurde 
631*b83c6205SAbhijit Gangurde out:
632*b83c6205SAbhijit Gangurde 	/* in case credit was depleted (more work posted than cq depth) */
633*b83c6205SAbhijit Gangurde 	if (cq->credit <= 0)
634*b83c6205SAbhijit Gangurde 		ionic_reserve_sync_cq(dev, cq);
635*b83c6205SAbhijit Gangurde 
636*b83c6205SAbhijit Gangurde 	spin_unlock_irqrestore(&cq->lock, irqflags);
637*b83c6205SAbhijit Gangurde 
638*b83c6205SAbhijit Gangurde 	return npolled ?: rc;
639*b83c6205SAbhijit Gangurde }
640*b83c6205SAbhijit Gangurde 
641*b83c6205SAbhijit Gangurde int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc)
642*b83c6205SAbhijit Gangurde {
643*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
644*b83c6205SAbhijit Gangurde 	struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
645*b83c6205SAbhijit Gangurde 	int rc_tmp, rc = 0, npolled = 0;
646*b83c6205SAbhijit Gangurde 	int cq_i, cq_x, cq_ix;
647*b83c6205SAbhijit Gangurde 
648*b83c6205SAbhijit Gangurde 	cq_x = vcq->poll_idx;
649*b83c6205SAbhijit Gangurde 	vcq->poll_idx ^= dev->lif_cfg.udma_count - 1;
650*b83c6205SAbhijit Gangurde 
651*b83c6205SAbhijit Gangurde 	for (cq_i = 0; npolled < nwc && cq_i < dev->lif_cfg.udma_count; ++cq_i) {
652*b83c6205SAbhijit Gangurde 		cq_ix = cq_i ^ cq_x;
653*b83c6205SAbhijit Gangurde 
654*b83c6205SAbhijit Gangurde 		if (!(vcq->udma_mask & BIT(cq_ix)))
655*b83c6205SAbhijit Gangurde 			continue;
656*b83c6205SAbhijit Gangurde 
657*b83c6205SAbhijit Gangurde 		rc_tmp = ionic_poll_vcq_cq(dev, &vcq->cq[cq_ix],
658*b83c6205SAbhijit Gangurde 					   nwc - npolled,
659*b83c6205SAbhijit Gangurde 					   wc + npolled);
660*b83c6205SAbhijit Gangurde 
661*b83c6205SAbhijit Gangurde 		if (rc_tmp >= 0)
662*b83c6205SAbhijit Gangurde 			npolled += rc_tmp;
663*b83c6205SAbhijit Gangurde 		else if (!rc)
664*b83c6205SAbhijit Gangurde 			rc = rc_tmp;
665*b83c6205SAbhijit Gangurde 	}
666*b83c6205SAbhijit Gangurde 
667*b83c6205SAbhijit Gangurde 	return npolled ?: rc;
668*b83c6205SAbhijit Gangurde }
669*b83c6205SAbhijit Gangurde 
670*b83c6205SAbhijit Gangurde static int ionic_req_notify_vcq_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
671*b83c6205SAbhijit Gangurde 				   enum ib_cq_notify_flags flags)
672*b83c6205SAbhijit Gangurde {
673*b83c6205SAbhijit Gangurde 	u64 dbell_val = cq->q.dbell;
674*b83c6205SAbhijit Gangurde 
675*b83c6205SAbhijit Gangurde 	if (flags & IB_CQ_SOLICITED) {
676*b83c6205SAbhijit Gangurde 		cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod);
677*b83c6205SAbhijit Gangurde 		dbell_val |= cq->arm_sol_prod | IONIC_CQ_RING_SOL;
678*b83c6205SAbhijit Gangurde 	} else {
679*b83c6205SAbhijit Gangurde 		cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
680*b83c6205SAbhijit Gangurde 		dbell_val |= cq->arm_any_prod | IONIC_CQ_RING_ARM;
681*b83c6205SAbhijit Gangurde 	}
682*b83c6205SAbhijit Gangurde 
683*b83c6205SAbhijit Gangurde 	ionic_reserve_sync_cq(dev, cq);
684*b83c6205SAbhijit Gangurde 
685*b83c6205SAbhijit Gangurde 	ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, dbell_val);
686*b83c6205SAbhijit Gangurde 
687*b83c6205SAbhijit Gangurde 	/*
688*b83c6205SAbhijit Gangurde 	 * IB_CQ_REPORT_MISSED_EVENTS:
689*b83c6205SAbhijit Gangurde 	 *
690*b83c6205SAbhijit Gangurde 	 * The queue index in ring zero guarantees no missed events.
691*b83c6205SAbhijit Gangurde 	 *
692*b83c6205SAbhijit Gangurde 	 * Here, we check if the color bit in the next cqe is flipped.	If it
693*b83c6205SAbhijit Gangurde 	 * is flipped, then progress can be made by immediately polling the cq.
694*b83c6205SAbhijit Gangurde 	 * Still, the cq will be armed, and an event will be generated.	 The cq
695*b83c6205SAbhijit Gangurde 	 * may be empty when polled after the event, because the next poll
696*b83c6205SAbhijit Gangurde 	 * after arming the cq can empty it.
697*b83c6205SAbhijit Gangurde 	 */
698*b83c6205SAbhijit Gangurde 	return (flags & IB_CQ_REPORT_MISSED_EVENTS) &&
699*b83c6205SAbhijit Gangurde 		cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q));
700*b83c6205SAbhijit Gangurde }
701*b83c6205SAbhijit Gangurde 
702*b83c6205SAbhijit Gangurde int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
703*b83c6205SAbhijit Gangurde {
704*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
705*b83c6205SAbhijit Gangurde 	struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
706*b83c6205SAbhijit Gangurde 	int rc = 0, cq_i;
707*b83c6205SAbhijit Gangurde 
708*b83c6205SAbhijit Gangurde 	for (cq_i = 0; cq_i < dev->lif_cfg.udma_count; ++cq_i) {
709*b83c6205SAbhijit Gangurde 		if (!(vcq->udma_mask & BIT(cq_i)))
710*b83c6205SAbhijit Gangurde 			continue;
711*b83c6205SAbhijit Gangurde 
712*b83c6205SAbhijit Gangurde 		if (ionic_req_notify_vcq_cq(dev, &vcq->cq[cq_i], flags))
713*b83c6205SAbhijit Gangurde 			rc = 1;
714*b83c6205SAbhijit Gangurde 	}
715*b83c6205SAbhijit Gangurde 
716*b83c6205SAbhijit Gangurde 	return rc;
717*b83c6205SAbhijit Gangurde }
718*b83c6205SAbhijit Gangurde 
719*b83c6205SAbhijit Gangurde static s64 ionic_prep_inline(void *data, u32 max_data,
720*b83c6205SAbhijit Gangurde 			     const struct ib_sge *ib_sgl, int num_sge)
721*b83c6205SAbhijit Gangurde {
722*b83c6205SAbhijit Gangurde 	static const s64 bit_31 = 1u << 31;
723*b83c6205SAbhijit Gangurde 	s64 len = 0, sg_len;
724*b83c6205SAbhijit Gangurde 	int sg_i;
725*b83c6205SAbhijit Gangurde 
726*b83c6205SAbhijit Gangurde 	for (sg_i = 0; sg_i < num_sge; ++sg_i) {
727*b83c6205SAbhijit Gangurde 		sg_len = ib_sgl[sg_i].length;
728*b83c6205SAbhijit Gangurde 
729*b83c6205SAbhijit Gangurde 		/* sge length zero means 2GB */
730*b83c6205SAbhijit Gangurde 		if (unlikely(sg_len == 0))
731*b83c6205SAbhijit Gangurde 			sg_len = bit_31;
732*b83c6205SAbhijit Gangurde 
733*b83c6205SAbhijit Gangurde 		/* greater than max inline data is invalid */
734*b83c6205SAbhijit Gangurde 		if (unlikely(len + sg_len > max_data))
735*b83c6205SAbhijit Gangurde 			return -EINVAL;
736*b83c6205SAbhijit Gangurde 
737*b83c6205SAbhijit Gangurde 		memcpy(data + len, (void *)ib_sgl[sg_i].addr, sg_len);
738*b83c6205SAbhijit Gangurde 
739*b83c6205SAbhijit Gangurde 		len += sg_len;
740*b83c6205SAbhijit Gangurde 	}
741*b83c6205SAbhijit Gangurde 
742*b83c6205SAbhijit Gangurde 	return len;
743*b83c6205SAbhijit Gangurde }
744*b83c6205SAbhijit Gangurde 
745*b83c6205SAbhijit Gangurde static s64 ionic_prep_pld(struct ionic_v1_wqe *wqe,
746*b83c6205SAbhijit Gangurde 			  union ionic_v1_pld *pld,
747*b83c6205SAbhijit Gangurde 			  int spec, u32 max_sge,
748*b83c6205SAbhijit Gangurde 			  const struct ib_sge *ib_sgl,
749*b83c6205SAbhijit Gangurde 			  int num_sge)
750*b83c6205SAbhijit Gangurde {
751*b83c6205SAbhijit Gangurde 	static const s64 bit_31 = 1l << 31;
752*b83c6205SAbhijit Gangurde 	struct ionic_sge *sgl;
753*b83c6205SAbhijit Gangurde 	__be32 *spec32 = NULL;
754*b83c6205SAbhijit Gangurde 	__be16 *spec16 = NULL;
755*b83c6205SAbhijit Gangurde 	s64 len = 0, sg_len;
756*b83c6205SAbhijit Gangurde 	int sg_i = 0;
757*b83c6205SAbhijit Gangurde 
758*b83c6205SAbhijit Gangurde 	if (unlikely(num_sge < 0 || (u32)num_sge > max_sge))
759*b83c6205SAbhijit Gangurde 		return -EINVAL;
760*b83c6205SAbhijit Gangurde 
761*b83c6205SAbhijit Gangurde 	if (spec && num_sge > IONIC_V1_SPEC_FIRST_SGE) {
762*b83c6205SAbhijit Gangurde 		sg_i = IONIC_V1_SPEC_FIRST_SGE;
763*b83c6205SAbhijit Gangurde 
764*b83c6205SAbhijit Gangurde 		if (num_sge > 8) {
765*b83c6205SAbhijit Gangurde 			wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC16);
766*b83c6205SAbhijit Gangurde 			spec16 = pld->spec16;
767*b83c6205SAbhijit Gangurde 		} else {
768*b83c6205SAbhijit Gangurde 			wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC32);
769*b83c6205SAbhijit Gangurde 			spec32 = pld->spec32;
770*b83c6205SAbhijit Gangurde 		}
771*b83c6205SAbhijit Gangurde 	}
772*b83c6205SAbhijit Gangurde 
773*b83c6205SAbhijit Gangurde 	sgl = &pld->sgl[sg_i];
774*b83c6205SAbhijit Gangurde 
775*b83c6205SAbhijit Gangurde 	for (sg_i = 0; sg_i < num_sge; ++sg_i) {
776*b83c6205SAbhijit Gangurde 		sg_len = ib_sgl[sg_i].length;
777*b83c6205SAbhijit Gangurde 
778*b83c6205SAbhijit Gangurde 		/* sge length zero means 2GB */
779*b83c6205SAbhijit Gangurde 		if (unlikely(sg_len == 0))
780*b83c6205SAbhijit Gangurde 			sg_len = bit_31;
781*b83c6205SAbhijit Gangurde 
782*b83c6205SAbhijit Gangurde 		/* greater than 2GB data is invalid */
783*b83c6205SAbhijit Gangurde 		if (unlikely(len + sg_len > bit_31))
784*b83c6205SAbhijit Gangurde 			return -EINVAL;
785*b83c6205SAbhijit Gangurde 
786*b83c6205SAbhijit Gangurde 		sgl[sg_i].va = cpu_to_be64(ib_sgl[sg_i].addr);
787*b83c6205SAbhijit Gangurde 		sgl[sg_i].len = cpu_to_be32(sg_len);
788*b83c6205SAbhijit Gangurde 		sgl[sg_i].lkey = cpu_to_be32(ib_sgl[sg_i].lkey);
789*b83c6205SAbhijit Gangurde 
790*b83c6205SAbhijit Gangurde 		if (spec32) {
791*b83c6205SAbhijit Gangurde 			spec32[sg_i] = sgl[sg_i].len;
792*b83c6205SAbhijit Gangurde 		} else if (spec16) {
793*b83c6205SAbhijit Gangurde 			if (unlikely(sg_len > U16_MAX))
794*b83c6205SAbhijit Gangurde 				return -EINVAL;
795*b83c6205SAbhijit Gangurde 			spec16[sg_i] = cpu_to_be16(sg_len);
796*b83c6205SAbhijit Gangurde 		}
797*b83c6205SAbhijit Gangurde 
798*b83c6205SAbhijit Gangurde 		len += sg_len;
799*b83c6205SAbhijit Gangurde 	}
800*b83c6205SAbhijit Gangurde 
801*b83c6205SAbhijit Gangurde 	return len;
802*b83c6205SAbhijit Gangurde }
803*b83c6205SAbhijit Gangurde 
804*b83c6205SAbhijit Gangurde static void ionic_prep_base(struct ionic_qp *qp,
805*b83c6205SAbhijit Gangurde 			    const struct ib_send_wr *wr,
806*b83c6205SAbhijit Gangurde 			    struct ionic_sq_meta *meta,
807*b83c6205SAbhijit Gangurde 			    struct ionic_v1_wqe *wqe)
808*b83c6205SAbhijit Gangurde {
809*b83c6205SAbhijit Gangurde 	meta->wrid = wr->wr_id;
810*b83c6205SAbhijit Gangurde 	meta->ibsts = IB_WC_SUCCESS;
811*b83c6205SAbhijit Gangurde 	meta->signal = false;
812*b83c6205SAbhijit Gangurde 	meta->local_comp = false;
813*b83c6205SAbhijit Gangurde 
814*b83c6205SAbhijit Gangurde 	wqe->base.wqe_id = qp->sq.prod;
815*b83c6205SAbhijit Gangurde 
816*b83c6205SAbhijit Gangurde 	if (wr->send_flags & IB_SEND_FENCE)
817*b83c6205SAbhijit Gangurde 		wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_FENCE);
818*b83c6205SAbhijit Gangurde 
819*b83c6205SAbhijit Gangurde 	if (wr->send_flags & IB_SEND_SOLICITED)
820*b83c6205SAbhijit Gangurde 		wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SOL);
821*b83c6205SAbhijit Gangurde 
822*b83c6205SAbhijit Gangurde 	if (qp->sig_all || wr->send_flags & IB_SEND_SIGNALED) {
823*b83c6205SAbhijit Gangurde 		wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SIG);
824*b83c6205SAbhijit Gangurde 		meta->signal = true;
825*b83c6205SAbhijit Gangurde 	}
826*b83c6205SAbhijit Gangurde 
827*b83c6205SAbhijit Gangurde 	meta->seq = qp->sq_msn_prod;
828*b83c6205SAbhijit Gangurde 	meta->remote =
829*b83c6205SAbhijit Gangurde 		qp->ibqp.qp_type != IB_QPT_UD &&
830*b83c6205SAbhijit Gangurde 		qp->ibqp.qp_type != IB_QPT_GSI &&
831*b83c6205SAbhijit Gangurde 		!ionic_ibop_is_local(wr->opcode);
832*b83c6205SAbhijit Gangurde 
833*b83c6205SAbhijit Gangurde 	if (meta->remote) {
834*b83c6205SAbhijit Gangurde 		qp->sq_msn_idx[meta->seq] = qp->sq.prod;
835*b83c6205SAbhijit Gangurde 		qp->sq_msn_prod = ionic_queue_next(&qp->sq, qp->sq_msn_prod);
836*b83c6205SAbhijit Gangurde 	}
837*b83c6205SAbhijit Gangurde 
838*b83c6205SAbhijit Gangurde 	ionic_queue_produce(&qp->sq);
839*b83c6205SAbhijit Gangurde }
840*b83c6205SAbhijit Gangurde 
841*b83c6205SAbhijit Gangurde static int ionic_prep_common(struct ionic_qp *qp,
842*b83c6205SAbhijit Gangurde 			     const struct ib_send_wr *wr,
843*b83c6205SAbhijit Gangurde 			     struct ionic_sq_meta *meta,
844*b83c6205SAbhijit Gangurde 			     struct ionic_v1_wqe *wqe)
845*b83c6205SAbhijit Gangurde {
846*b83c6205SAbhijit Gangurde 	s64 signed_len;
847*b83c6205SAbhijit Gangurde 	u32 mval;
848*b83c6205SAbhijit Gangurde 
849*b83c6205SAbhijit Gangurde 	if (wr->send_flags & IB_SEND_INLINE) {
850*b83c6205SAbhijit Gangurde 		wqe->base.num_sge_key = 0;
851*b83c6205SAbhijit Gangurde 		wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_INL);
852*b83c6205SAbhijit Gangurde 		mval = ionic_v1_send_wqe_max_data(qp->sq.stride_log2, false);
853*b83c6205SAbhijit Gangurde 		signed_len = ionic_prep_inline(wqe->common.pld.data, mval,
854*b83c6205SAbhijit Gangurde 					       wr->sg_list, wr->num_sge);
855*b83c6205SAbhijit Gangurde 	} else {
856*b83c6205SAbhijit Gangurde 		wqe->base.num_sge_key = wr->num_sge;
857*b83c6205SAbhijit Gangurde 		mval = ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
858*b83c6205SAbhijit Gangurde 						 qp->sq_spec,
859*b83c6205SAbhijit Gangurde 						 false);
860*b83c6205SAbhijit Gangurde 		signed_len = ionic_prep_pld(wqe, &wqe->common.pld,
861*b83c6205SAbhijit Gangurde 					    qp->sq_spec, mval,
862*b83c6205SAbhijit Gangurde 					    wr->sg_list, wr->num_sge);
863*b83c6205SAbhijit Gangurde 	}
864*b83c6205SAbhijit Gangurde 
865*b83c6205SAbhijit Gangurde 	if (unlikely(signed_len < 0))
866*b83c6205SAbhijit Gangurde 		return signed_len;
867*b83c6205SAbhijit Gangurde 
868*b83c6205SAbhijit Gangurde 	meta->len = signed_len;
869*b83c6205SAbhijit Gangurde 	wqe->common.length = cpu_to_be32(signed_len);
870*b83c6205SAbhijit Gangurde 
871*b83c6205SAbhijit Gangurde 	ionic_prep_base(qp, wr, meta, wqe);
872*b83c6205SAbhijit Gangurde 
873*b83c6205SAbhijit Gangurde 	return 0;
874*b83c6205SAbhijit Gangurde }
875*b83c6205SAbhijit Gangurde 
876*b83c6205SAbhijit Gangurde static void ionic_prep_sq_wqe(struct ionic_qp *qp, void *wqe)
877*b83c6205SAbhijit Gangurde {
878*b83c6205SAbhijit Gangurde 	memset(wqe, 0, 1u << qp->sq.stride_log2);
879*b83c6205SAbhijit Gangurde }
880*b83c6205SAbhijit Gangurde 
881*b83c6205SAbhijit Gangurde static void ionic_prep_rq_wqe(struct ionic_qp *qp, void *wqe)
882*b83c6205SAbhijit Gangurde {
883*b83c6205SAbhijit Gangurde 	memset(wqe, 0, 1u << qp->rq.stride_log2);
884*b83c6205SAbhijit Gangurde }
885*b83c6205SAbhijit Gangurde 
886*b83c6205SAbhijit Gangurde static int ionic_prep_send(struct ionic_qp *qp,
887*b83c6205SAbhijit Gangurde 			   const struct ib_send_wr *wr)
888*b83c6205SAbhijit Gangurde {
889*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
890*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
891*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
892*b83c6205SAbhijit Gangurde 
893*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.prod];
894*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_prod(&qp->sq);
895*b83c6205SAbhijit Gangurde 
896*b83c6205SAbhijit Gangurde 	ionic_prep_sq_wqe(qp, wqe);
897*b83c6205SAbhijit Gangurde 
898*b83c6205SAbhijit Gangurde 	meta->ibop = IB_WC_SEND;
899*b83c6205SAbhijit Gangurde 
900*b83c6205SAbhijit Gangurde 	switch (wr->opcode) {
901*b83c6205SAbhijit Gangurde 	case IB_WR_SEND:
902*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
903*b83c6205SAbhijit Gangurde 		break;
904*b83c6205SAbhijit Gangurde 	case IB_WR_SEND_WITH_IMM:
905*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
906*b83c6205SAbhijit Gangurde 		wqe->base.imm_data_key = wr->ex.imm_data;
907*b83c6205SAbhijit Gangurde 		break;
908*b83c6205SAbhijit Gangurde 	case IB_WR_SEND_WITH_INV:
909*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_INV);
910*b83c6205SAbhijit Gangurde 		wqe->base.imm_data_key =
911*b83c6205SAbhijit Gangurde 			cpu_to_be32(wr->ex.invalidate_rkey);
912*b83c6205SAbhijit Gangurde 		break;
913*b83c6205SAbhijit Gangurde 	default:
914*b83c6205SAbhijit Gangurde 		return -EINVAL;
915*b83c6205SAbhijit Gangurde 	}
916*b83c6205SAbhijit Gangurde 
917*b83c6205SAbhijit Gangurde 	return ionic_prep_common(qp, wr, meta, wqe);
918*b83c6205SAbhijit Gangurde }
919*b83c6205SAbhijit Gangurde 
920*b83c6205SAbhijit Gangurde static int ionic_prep_send_ud(struct ionic_qp *qp,
921*b83c6205SAbhijit Gangurde 			      const struct ib_ud_wr *wr)
922*b83c6205SAbhijit Gangurde {
923*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
924*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
925*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
926*b83c6205SAbhijit Gangurde 	struct ionic_ah *ah;
927*b83c6205SAbhijit Gangurde 
928*b83c6205SAbhijit Gangurde 	if (unlikely(!wr->ah))
929*b83c6205SAbhijit Gangurde 		return -EINVAL;
930*b83c6205SAbhijit Gangurde 
931*b83c6205SAbhijit Gangurde 	ah = to_ionic_ah(wr->ah);
932*b83c6205SAbhijit Gangurde 
933*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.prod];
934*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_prod(&qp->sq);
935*b83c6205SAbhijit Gangurde 
936*b83c6205SAbhijit Gangurde 	ionic_prep_sq_wqe(qp, wqe);
937*b83c6205SAbhijit Gangurde 
938*b83c6205SAbhijit Gangurde 	wqe->common.send.ah_id = cpu_to_be32(ah->ahid);
939*b83c6205SAbhijit Gangurde 	wqe->common.send.dest_qpn = cpu_to_be32(wr->remote_qpn);
940*b83c6205SAbhijit Gangurde 	wqe->common.send.dest_qkey = cpu_to_be32(wr->remote_qkey);
941*b83c6205SAbhijit Gangurde 
942*b83c6205SAbhijit Gangurde 	meta->ibop = IB_WC_SEND;
943*b83c6205SAbhijit Gangurde 
944*b83c6205SAbhijit Gangurde 	switch (wr->wr.opcode) {
945*b83c6205SAbhijit Gangurde 	case IB_WR_SEND:
946*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
947*b83c6205SAbhijit Gangurde 		break;
948*b83c6205SAbhijit Gangurde 	case IB_WR_SEND_WITH_IMM:
949*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
950*b83c6205SAbhijit Gangurde 		wqe->base.imm_data_key = wr->wr.ex.imm_data;
951*b83c6205SAbhijit Gangurde 		break;
952*b83c6205SAbhijit Gangurde 	default:
953*b83c6205SAbhijit Gangurde 		return -EINVAL;
954*b83c6205SAbhijit Gangurde 	}
955*b83c6205SAbhijit Gangurde 
956*b83c6205SAbhijit Gangurde 	return ionic_prep_common(qp, &wr->wr, meta, wqe);
957*b83c6205SAbhijit Gangurde }
958*b83c6205SAbhijit Gangurde 
959*b83c6205SAbhijit Gangurde static int ionic_prep_rdma(struct ionic_qp *qp,
960*b83c6205SAbhijit Gangurde 			   const struct ib_rdma_wr *wr)
961*b83c6205SAbhijit Gangurde {
962*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
963*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
964*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
965*b83c6205SAbhijit Gangurde 
966*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.prod];
967*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_prod(&qp->sq);
968*b83c6205SAbhijit Gangurde 
969*b83c6205SAbhijit Gangurde 	ionic_prep_sq_wqe(qp, wqe);
970*b83c6205SAbhijit Gangurde 
971*b83c6205SAbhijit Gangurde 	meta->ibop = IB_WC_RDMA_WRITE;
972*b83c6205SAbhijit Gangurde 
973*b83c6205SAbhijit Gangurde 	switch (wr->wr.opcode) {
974*b83c6205SAbhijit Gangurde 	case IB_WR_RDMA_READ:
975*b83c6205SAbhijit Gangurde 		if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
976*b83c6205SAbhijit Gangurde 			return -EINVAL;
977*b83c6205SAbhijit Gangurde 		meta->ibop = IB_WC_RDMA_READ;
978*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_READ);
979*b83c6205SAbhijit Gangurde 		break;
980*b83c6205SAbhijit Gangurde 	case IB_WR_RDMA_WRITE:
981*b83c6205SAbhijit Gangurde 		if (wr->wr.send_flags & IB_SEND_SOLICITED)
982*b83c6205SAbhijit Gangurde 			return -EINVAL;
983*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE);
984*b83c6205SAbhijit Gangurde 		break;
985*b83c6205SAbhijit Gangurde 	case IB_WR_RDMA_WRITE_WITH_IMM:
986*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE_IMM);
987*b83c6205SAbhijit Gangurde 		wqe->base.imm_data_key = wr->wr.ex.imm_data;
988*b83c6205SAbhijit Gangurde 		break;
989*b83c6205SAbhijit Gangurde 	default:
990*b83c6205SAbhijit Gangurde 		return -EINVAL;
991*b83c6205SAbhijit Gangurde 	}
992*b83c6205SAbhijit Gangurde 
993*b83c6205SAbhijit Gangurde 	wqe->common.rdma.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
994*b83c6205SAbhijit Gangurde 	wqe->common.rdma.remote_va_low = cpu_to_be32(wr->remote_addr);
995*b83c6205SAbhijit Gangurde 	wqe->common.rdma.remote_rkey = cpu_to_be32(wr->rkey);
996*b83c6205SAbhijit Gangurde 
997*b83c6205SAbhijit Gangurde 	return ionic_prep_common(qp, &wr->wr, meta, wqe);
998*b83c6205SAbhijit Gangurde }
999*b83c6205SAbhijit Gangurde 
1000*b83c6205SAbhijit Gangurde static int ionic_prep_atomic(struct ionic_qp *qp,
1001*b83c6205SAbhijit Gangurde 			     const struct ib_atomic_wr *wr)
1002*b83c6205SAbhijit Gangurde {
1003*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1004*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
1005*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
1006*b83c6205SAbhijit Gangurde 
1007*b83c6205SAbhijit Gangurde 	if (wr->wr.num_sge != 1 || wr->wr.sg_list[0].length != 8)
1008*b83c6205SAbhijit Gangurde 		return -EINVAL;
1009*b83c6205SAbhijit Gangurde 
1010*b83c6205SAbhijit Gangurde 	if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
1011*b83c6205SAbhijit Gangurde 		return -EINVAL;
1012*b83c6205SAbhijit Gangurde 
1013*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.prod];
1014*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_prod(&qp->sq);
1015*b83c6205SAbhijit Gangurde 
1016*b83c6205SAbhijit Gangurde 	ionic_prep_sq_wqe(qp, wqe);
1017*b83c6205SAbhijit Gangurde 
1018*b83c6205SAbhijit Gangurde 	meta->ibop = IB_WC_RDMA_WRITE;
1019*b83c6205SAbhijit Gangurde 
1020*b83c6205SAbhijit Gangurde 	switch (wr->wr.opcode) {
1021*b83c6205SAbhijit Gangurde 	case IB_WR_ATOMIC_CMP_AND_SWP:
1022*b83c6205SAbhijit Gangurde 		meta->ibop = IB_WC_COMP_SWAP;
1023*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_CS);
1024*b83c6205SAbhijit Gangurde 		wqe->atomic.swap_add_high = cpu_to_be32(wr->swap >> 32);
1025*b83c6205SAbhijit Gangurde 		wqe->atomic.swap_add_low = cpu_to_be32(wr->swap);
1026*b83c6205SAbhijit Gangurde 		wqe->atomic.compare_high = cpu_to_be32(wr->compare_add >> 32);
1027*b83c6205SAbhijit Gangurde 		wqe->atomic.compare_low = cpu_to_be32(wr->compare_add);
1028*b83c6205SAbhijit Gangurde 		break;
1029*b83c6205SAbhijit Gangurde 	case IB_WR_ATOMIC_FETCH_AND_ADD:
1030*b83c6205SAbhijit Gangurde 		meta->ibop = IB_WC_FETCH_ADD;
1031*b83c6205SAbhijit Gangurde 		wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_FA);
1032*b83c6205SAbhijit Gangurde 		wqe->atomic.swap_add_high = cpu_to_be32(wr->compare_add >> 32);
1033*b83c6205SAbhijit Gangurde 		wqe->atomic.swap_add_low = cpu_to_be32(wr->compare_add);
1034*b83c6205SAbhijit Gangurde 		break;
1035*b83c6205SAbhijit Gangurde 	default:
1036*b83c6205SAbhijit Gangurde 		return -EINVAL;
1037*b83c6205SAbhijit Gangurde 	}
1038*b83c6205SAbhijit Gangurde 
1039*b83c6205SAbhijit Gangurde 	wqe->atomic.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
1040*b83c6205SAbhijit Gangurde 	wqe->atomic.remote_va_low = cpu_to_be32(wr->remote_addr);
1041*b83c6205SAbhijit Gangurde 	wqe->atomic.remote_rkey = cpu_to_be32(wr->rkey);
1042*b83c6205SAbhijit Gangurde 
1043*b83c6205SAbhijit Gangurde 	wqe->base.num_sge_key = 1;
1044*b83c6205SAbhijit Gangurde 	wqe->atomic.sge.va = cpu_to_be64(wr->wr.sg_list[0].addr);
1045*b83c6205SAbhijit Gangurde 	wqe->atomic.sge.len = cpu_to_be32(8);
1046*b83c6205SAbhijit Gangurde 	wqe->atomic.sge.lkey = cpu_to_be32(wr->wr.sg_list[0].lkey);
1047*b83c6205SAbhijit Gangurde 
1048*b83c6205SAbhijit Gangurde 	return ionic_prep_common(qp, &wr->wr, meta, wqe);
1049*b83c6205SAbhijit Gangurde }
1050*b83c6205SAbhijit Gangurde 
1051*b83c6205SAbhijit Gangurde static int ionic_prep_inv(struct ionic_qp *qp,
1052*b83c6205SAbhijit Gangurde 			  const struct ib_send_wr *wr)
1053*b83c6205SAbhijit Gangurde {
1054*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1055*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
1056*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
1057*b83c6205SAbhijit Gangurde 
1058*b83c6205SAbhijit Gangurde 	if (wr->send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
1059*b83c6205SAbhijit Gangurde 		return -EINVAL;
1060*b83c6205SAbhijit Gangurde 
1061*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.prod];
1062*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_prod(&qp->sq);
1063*b83c6205SAbhijit Gangurde 
1064*b83c6205SAbhijit Gangurde 	ionic_prep_sq_wqe(qp, wqe);
1065*b83c6205SAbhijit Gangurde 
1066*b83c6205SAbhijit Gangurde 	wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, LOCAL_INV);
1067*b83c6205SAbhijit Gangurde 	wqe->base.imm_data_key = cpu_to_be32(wr->ex.invalidate_rkey);
1068*b83c6205SAbhijit Gangurde 
1069*b83c6205SAbhijit Gangurde 	meta->len = 0;
1070*b83c6205SAbhijit Gangurde 	meta->ibop = IB_WC_LOCAL_INV;
1071*b83c6205SAbhijit Gangurde 
1072*b83c6205SAbhijit Gangurde 	ionic_prep_base(qp, wr, meta, wqe);
1073*b83c6205SAbhijit Gangurde 
1074*b83c6205SAbhijit Gangurde 	return 0;
1075*b83c6205SAbhijit Gangurde }
1076*b83c6205SAbhijit Gangurde 
1077*b83c6205SAbhijit Gangurde static int ionic_prep_reg(struct ionic_qp *qp,
1078*b83c6205SAbhijit Gangurde 			  const struct ib_reg_wr *wr)
1079*b83c6205SAbhijit Gangurde {
1080*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1081*b83c6205SAbhijit Gangurde 	struct ionic_mr *mr = to_ionic_mr(wr->mr);
1082*b83c6205SAbhijit Gangurde 	struct ionic_sq_meta *meta;
1083*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
1084*b83c6205SAbhijit Gangurde 	__le64 dma_addr;
1085*b83c6205SAbhijit Gangurde 	int flags;
1086*b83c6205SAbhijit Gangurde 
1087*b83c6205SAbhijit Gangurde 	if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
1088*b83c6205SAbhijit Gangurde 		return -EINVAL;
1089*b83c6205SAbhijit Gangurde 
1090*b83c6205SAbhijit Gangurde 	/* must call ib_map_mr_sg before posting reg wr */
1091*b83c6205SAbhijit Gangurde 	if (!mr->buf.tbl_pages)
1092*b83c6205SAbhijit Gangurde 		return -EINVAL;
1093*b83c6205SAbhijit Gangurde 
1094*b83c6205SAbhijit Gangurde 	meta = &qp->sq_meta[qp->sq.prod];
1095*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_prod(&qp->sq);
1096*b83c6205SAbhijit Gangurde 
1097*b83c6205SAbhijit Gangurde 	ionic_prep_sq_wqe(qp, wqe);
1098*b83c6205SAbhijit Gangurde 
1099*b83c6205SAbhijit Gangurde 	flags = to_ionic_mr_flags(wr->access);
1100*b83c6205SAbhijit Gangurde 
1101*b83c6205SAbhijit Gangurde 	wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, REG_MR);
1102*b83c6205SAbhijit Gangurde 	wqe->base.num_sge_key = wr->key;
1103*b83c6205SAbhijit Gangurde 	wqe->base.imm_data_key = cpu_to_be32(mr->ibmr.lkey);
1104*b83c6205SAbhijit Gangurde 	wqe->reg_mr.va = cpu_to_be64(mr->ibmr.iova);
1105*b83c6205SAbhijit Gangurde 	wqe->reg_mr.length = cpu_to_be64(mr->ibmr.length);
1106*b83c6205SAbhijit Gangurde 	wqe->reg_mr.offset = ionic_pgtbl_off(&mr->buf, mr->ibmr.iova);
1107*b83c6205SAbhijit Gangurde 	dma_addr = ionic_pgtbl_dma(&mr->buf, mr->ibmr.iova);
1108*b83c6205SAbhijit Gangurde 	wqe->reg_mr.dma_addr = cpu_to_be64(le64_to_cpu(dma_addr));
1109*b83c6205SAbhijit Gangurde 
1110*b83c6205SAbhijit Gangurde 	wqe->reg_mr.map_count = cpu_to_be32(mr->buf.tbl_pages);
1111*b83c6205SAbhijit Gangurde 	wqe->reg_mr.flags = cpu_to_be16(flags);
1112*b83c6205SAbhijit Gangurde 	wqe->reg_mr.dir_size_log2 = 0;
1113*b83c6205SAbhijit Gangurde 	wqe->reg_mr.page_size_log2 = order_base_2(mr->ibmr.page_size);
1114*b83c6205SAbhijit Gangurde 
1115*b83c6205SAbhijit Gangurde 	meta->len = 0;
1116*b83c6205SAbhijit Gangurde 	meta->ibop = IB_WC_REG_MR;
1117*b83c6205SAbhijit Gangurde 
1118*b83c6205SAbhijit Gangurde 	ionic_prep_base(qp, &wr->wr, meta, wqe);
1119*b83c6205SAbhijit Gangurde 
1120*b83c6205SAbhijit Gangurde 	return 0;
1121*b83c6205SAbhijit Gangurde }
1122*b83c6205SAbhijit Gangurde 
1123*b83c6205SAbhijit Gangurde static int ionic_prep_one_rc(struct ionic_qp *qp,
1124*b83c6205SAbhijit Gangurde 			     const struct ib_send_wr *wr)
1125*b83c6205SAbhijit Gangurde {
1126*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1127*b83c6205SAbhijit Gangurde 	int rc = 0;
1128*b83c6205SAbhijit Gangurde 
1129*b83c6205SAbhijit Gangurde 	switch (wr->opcode) {
1130*b83c6205SAbhijit Gangurde 	case IB_WR_SEND:
1131*b83c6205SAbhijit Gangurde 	case IB_WR_SEND_WITH_IMM:
1132*b83c6205SAbhijit Gangurde 	case IB_WR_SEND_WITH_INV:
1133*b83c6205SAbhijit Gangurde 		rc = ionic_prep_send(qp, wr);
1134*b83c6205SAbhijit Gangurde 		break;
1135*b83c6205SAbhijit Gangurde 	case IB_WR_RDMA_READ:
1136*b83c6205SAbhijit Gangurde 	case IB_WR_RDMA_WRITE:
1137*b83c6205SAbhijit Gangurde 	case IB_WR_RDMA_WRITE_WITH_IMM:
1138*b83c6205SAbhijit Gangurde 		rc = ionic_prep_rdma(qp, rdma_wr(wr));
1139*b83c6205SAbhijit Gangurde 		break;
1140*b83c6205SAbhijit Gangurde 	case IB_WR_ATOMIC_CMP_AND_SWP:
1141*b83c6205SAbhijit Gangurde 	case IB_WR_ATOMIC_FETCH_AND_ADD:
1142*b83c6205SAbhijit Gangurde 		rc = ionic_prep_atomic(qp, atomic_wr(wr));
1143*b83c6205SAbhijit Gangurde 		break;
1144*b83c6205SAbhijit Gangurde 	case IB_WR_LOCAL_INV:
1145*b83c6205SAbhijit Gangurde 		rc = ionic_prep_inv(qp, wr);
1146*b83c6205SAbhijit Gangurde 		break;
1147*b83c6205SAbhijit Gangurde 	case IB_WR_REG_MR:
1148*b83c6205SAbhijit Gangurde 		rc = ionic_prep_reg(qp, reg_wr(wr));
1149*b83c6205SAbhijit Gangurde 		break;
1150*b83c6205SAbhijit Gangurde 	default:
1151*b83c6205SAbhijit Gangurde 		ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
1152*b83c6205SAbhijit Gangurde 		rc = -EINVAL;
1153*b83c6205SAbhijit Gangurde 	}
1154*b83c6205SAbhijit Gangurde 
1155*b83c6205SAbhijit Gangurde 	return rc;
1156*b83c6205SAbhijit Gangurde }
1157*b83c6205SAbhijit Gangurde 
1158*b83c6205SAbhijit Gangurde static int ionic_prep_one_ud(struct ionic_qp *qp,
1159*b83c6205SAbhijit Gangurde 			     const struct ib_send_wr *wr)
1160*b83c6205SAbhijit Gangurde {
1161*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
1162*b83c6205SAbhijit Gangurde 	int rc = 0;
1163*b83c6205SAbhijit Gangurde 
1164*b83c6205SAbhijit Gangurde 	switch (wr->opcode) {
1165*b83c6205SAbhijit Gangurde 	case IB_WR_SEND:
1166*b83c6205SAbhijit Gangurde 	case IB_WR_SEND_WITH_IMM:
1167*b83c6205SAbhijit Gangurde 		rc = ionic_prep_send_ud(qp, ud_wr(wr));
1168*b83c6205SAbhijit Gangurde 		break;
1169*b83c6205SAbhijit Gangurde 	default:
1170*b83c6205SAbhijit Gangurde 		ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
1171*b83c6205SAbhijit Gangurde 		rc = -EINVAL;
1172*b83c6205SAbhijit Gangurde 	}
1173*b83c6205SAbhijit Gangurde 
1174*b83c6205SAbhijit Gangurde 	return rc;
1175*b83c6205SAbhijit Gangurde }
1176*b83c6205SAbhijit Gangurde 
1177*b83c6205SAbhijit Gangurde static int ionic_prep_recv(struct ionic_qp *qp,
1178*b83c6205SAbhijit Gangurde 			   const struct ib_recv_wr *wr)
1179*b83c6205SAbhijit Gangurde {
1180*b83c6205SAbhijit Gangurde 	struct ionic_rq_meta *meta;
1181*b83c6205SAbhijit Gangurde 	struct ionic_v1_wqe *wqe;
1182*b83c6205SAbhijit Gangurde 	s64 signed_len;
1183*b83c6205SAbhijit Gangurde 	u32 mval;
1184*b83c6205SAbhijit Gangurde 
1185*b83c6205SAbhijit Gangurde 	wqe = ionic_queue_at_prod(&qp->rq);
1186*b83c6205SAbhijit Gangurde 
1187*b83c6205SAbhijit Gangurde 	/* if wqe is owned by device, caller can try posting again soon */
1188*b83c6205SAbhijit Gangurde 	if (wqe->base.flags & cpu_to_be16(IONIC_V1_FLAG_FENCE))
1189*b83c6205SAbhijit Gangurde 		return -EAGAIN;
1190*b83c6205SAbhijit Gangurde 
1191*b83c6205SAbhijit Gangurde 	meta = qp->rq_meta_head;
1192*b83c6205SAbhijit Gangurde 	if (unlikely(meta == IONIC_META_LAST) ||
1193*b83c6205SAbhijit Gangurde 	    unlikely(meta == IONIC_META_POSTED))
1194*b83c6205SAbhijit Gangurde 		return -EIO;
1195*b83c6205SAbhijit Gangurde 
1196*b83c6205SAbhijit Gangurde 	ionic_prep_rq_wqe(qp, wqe);
1197*b83c6205SAbhijit Gangurde 
1198*b83c6205SAbhijit Gangurde 	mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec,
1199*b83c6205SAbhijit Gangurde 					 false);
1200*b83c6205SAbhijit Gangurde 	signed_len = ionic_prep_pld(wqe, &wqe->recv.pld,
1201*b83c6205SAbhijit Gangurde 				    qp->rq_spec, mval,
1202*b83c6205SAbhijit Gangurde 				    wr->sg_list, wr->num_sge);
1203*b83c6205SAbhijit Gangurde 	if (signed_len < 0)
1204*b83c6205SAbhijit Gangurde 		return signed_len;
1205*b83c6205SAbhijit Gangurde 
1206*b83c6205SAbhijit Gangurde 	meta->wrid = wr->wr_id;
1207*b83c6205SAbhijit Gangurde 
1208*b83c6205SAbhijit Gangurde 	wqe->base.wqe_id = meta - qp->rq_meta;
1209*b83c6205SAbhijit Gangurde 	wqe->base.num_sge_key = wr->num_sge;
1210*b83c6205SAbhijit Gangurde 
1211*b83c6205SAbhijit Gangurde 	/* total length for recv goes in base imm_data_key */
1212*b83c6205SAbhijit Gangurde 	wqe->base.imm_data_key = cpu_to_be32(signed_len);
1213*b83c6205SAbhijit Gangurde 
1214*b83c6205SAbhijit Gangurde 	ionic_queue_produce(&qp->rq);
1215*b83c6205SAbhijit Gangurde 
1216*b83c6205SAbhijit Gangurde 	qp->rq_meta_head = meta->next;
1217*b83c6205SAbhijit Gangurde 	meta->next = IONIC_META_POSTED;
1218*b83c6205SAbhijit Gangurde 
1219*b83c6205SAbhijit Gangurde 	return 0;
1220*b83c6205SAbhijit Gangurde }
1221*b83c6205SAbhijit Gangurde 
1222*b83c6205SAbhijit Gangurde static int ionic_post_send_common(struct ionic_ibdev *dev,
1223*b83c6205SAbhijit Gangurde 				  struct ionic_vcq *vcq,
1224*b83c6205SAbhijit Gangurde 				  struct ionic_cq *cq,
1225*b83c6205SAbhijit Gangurde 				  struct ionic_qp *qp,
1226*b83c6205SAbhijit Gangurde 				  const struct ib_send_wr *wr,
1227*b83c6205SAbhijit Gangurde 				  const struct ib_send_wr **bad)
1228*b83c6205SAbhijit Gangurde {
1229*b83c6205SAbhijit Gangurde 	unsigned long irqflags;
1230*b83c6205SAbhijit Gangurde 	bool notify = false;
1231*b83c6205SAbhijit Gangurde 	int spend, rc = 0;
1232*b83c6205SAbhijit Gangurde 
1233*b83c6205SAbhijit Gangurde 	if (!bad)
1234*b83c6205SAbhijit Gangurde 		return -EINVAL;
1235*b83c6205SAbhijit Gangurde 
1236*b83c6205SAbhijit Gangurde 	if (!qp->has_sq) {
1237*b83c6205SAbhijit Gangurde 		*bad = wr;
1238*b83c6205SAbhijit Gangurde 		return -EINVAL;
1239*b83c6205SAbhijit Gangurde 	}
1240*b83c6205SAbhijit Gangurde 
1241*b83c6205SAbhijit Gangurde 	if (qp->state < IB_QPS_RTS) {
1242*b83c6205SAbhijit Gangurde 		*bad = wr;
1243*b83c6205SAbhijit Gangurde 		return -EINVAL;
1244*b83c6205SAbhijit Gangurde 	}
1245*b83c6205SAbhijit Gangurde 
1246*b83c6205SAbhijit Gangurde 	spin_lock_irqsave(&qp->sq_lock, irqflags);
1247*b83c6205SAbhijit Gangurde 
1248*b83c6205SAbhijit Gangurde 	while (wr) {
1249*b83c6205SAbhijit Gangurde 		if (ionic_queue_full(&qp->sq)) {
1250*b83c6205SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev, "queue full");
1251*b83c6205SAbhijit Gangurde 			rc = -ENOMEM;
1252*b83c6205SAbhijit Gangurde 			goto out;
1253*b83c6205SAbhijit Gangurde 		}
1254*b83c6205SAbhijit Gangurde 
1255*b83c6205SAbhijit Gangurde 		if (qp->ibqp.qp_type == IB_QPT_UD ||
1256*b83c6205SAbhijit Gangurde 		    qp->ibqp.qp_type == IB_QPT_GSI)
1257*b83c6205SAbhijit Gangurde 			rc = ionic_prep_one_ud(qp, wr);
1258*b83c6205SAbhijit Gangurde 		else
1259*b83c6205SAbhijit Gangurde 			rc = ionic_prep_one_rc(qp, wr);
1260*b83c6205SAbhijit Gangurde 		if (rc)
1261*b83c6205SAbhijit Gangurde 			goto out;
1262*b83c6205SAbhijit Gangurde 
1263*b83c6205SAbhijit Gangurde 		wr = wr->next;
1264*b83c6205SAbhijit Gangurde 	}
1265*b83c6205SAbhijit Gangurde 
1266*b83c6205SAbhijit Gangurde out:
1267*b83c6205SAbhijit Gangurde 	spin_unlock_irqrestore(&qp->sq_lock, irqflags);
1268*b83c6205SAbhijit Gangurde 
1269*b83c6205SAbhijit Gangurde 	spin_lock_irqsave(&cq->lock, irqflags);
1270*b83c6205SAbhijit Gangurde 	spin_lock(&qp->sq_lock);
1271*b83c6205SAbhijit Gangurde 
1272*b83c6205SAbhijit Gangurde 	if (likely(qp->sq.prod != qp->sq_old_prod)) {
1273*b83c6205SAbhijit Gangurde 		/* ring cq doorbell just in time */
1274*b83c6205SAbhijit Gangurde 		spend = (qp->sq.prod - qp->sq_old_prod) & qp->sq.mask;
1275*b83c6205SAbhijit Gangurde 		ionic_reserve_cq(dev, cq, spend);
1276*b83c6205SAbhijit Gangurde 
1277*b83c6205SAbhijit Gangurde 		qp->sq_old_prod = qp->sq.prod;
1278*b83c6205SAbhijit Gangurde 
1279*b83c6205SAbhijit Gangurde 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.sq_qtype,
1280*b83c6205SAbhijit Gangurde 				 ionic_queue_dbell_val(&qp->sq));
1281*b83c6205SAbhijit Gangurde 	}
1282*b83c6205SAbhijit Gangurde 
1283*b83c6205SAbhijit Gangurde 	if (qp->sq_flush) {
1284*b83c6205SAbhijit Gangurde 		notify = true;
1285*b83c6205SAbhijit Gangurde 		cq->flush = true;
1286*b83c6205SAbhijit Gangurde 		list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
1287*b83c6205SAbhijit Gangurde 	}
1288*b83c6205SAbhijit Gangurde 
1289*b83c6205SAbhijit Gangurde 	spin_unlock(&qp->sq_lock);
1290*b83c6205SAbhijit Gangurde 	spin_unlock_irqrestore(&cq->lock, irqflags);
1291*b83c6205SAbhijit Gangurde 
1292*b83c6205SAbhijit Gangurde 	if (notify && vcq->ibcq.comp_handler)
1293*b83c6205SAbhijit Gangurde 		vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
1294*b83c6205SAbhijit Gangurde 
1295*b83c6205SAbhijit Gangurde 	*bad = wr;
1296*b83c6205SAbhijit Gangurde 	return rc;
1297*b83c6205SAbhijit Gangurde }
1298*b83c6205SAbhijit Gangurde 
1299*b83c6205SAbhijit Gangurde static int ionic_post_recv_common(struct ionic_ibdev *dev,
1300*b83c6205SAbhijit Gangurde 				  struct ionic_vcq *vcq,
1301*b83c6205SAbhijit Gangurde 				  struct ionic_cq *cq,
1302*b83c6205SAbhijit Gangurde 				  struct ionic_qp *qp,
1303*b83c6205SAbhijit Gangurde 				  const struct ib_recv_wr *wr,
1304*b83c6205SAbhijit Gangurde 				  const struct ib_recv_wr **bad)
1305*b83c6205SAbhijit Gangurde {
1306*b83c6205SAbhijit Gangurde 	unsigned long irqflags;
1307*b83c6205SAbhijit Gangurde 	bool notify = false;
1308*b83c6205SAbhijit Gangurde 	int spend, rc = 0;
1309*b83c6205SAbhijit Gangurde 
1310*b83c6205SAbhijit Gangurde 	if (!bad)
1311*b83c6205SAbhijit Gangurde 		return -EINVAL;
1312*b83c6205SAbhijit Gangurde 
1313*b83c6205SAbhijit Gangurde 	if (!qp->has_rq) {
1314*b83c6205SAbhijit Gangurde 		*bad = wr;
1315*b83c6205SAbhijit Gangurde 		return -EINVAL;
1316*b83c6205SAbhijit Gangurde 	}
1317*b83c6205SAbhijit Gangurde 
1318*b83c6205SAbhijit Gangurde 	if (qp->state < IB_QPS_INIT) {
1319*b83c6205SAbhijit Gangurde 		*bad = wr;
1320*b83c6205SAbhijit Gangurde 		return -EINVAL;
1321*b83c6205SAbhijit Gangurde 	}
1322*b83c6205SAbhijit Gangurde 
1323*b83c6205SAbhijit Gangurde 	spin_lock_irqsave(&qp->rq_lock, irqflags);
1324*b83c6205SAbhijit Gangurde 
1325*b83c6205SAbhijit Gangurde 	while (wr) {
1326*b83c6205SAbhijit Gangurde 		if (ionic_queue_full(&qp->rq)) {
1327*b83c6205SAbhijit Gangurde 			ibdev_dbg(&dev->ibdev, "queue full");
1328*b83c6205SAbhijit Gangurde 			rc = -ENOMEM;
1329*b83c6205SAbhijit Gangurde 			goto out;
1330*b83c6205SAbhijit Gangurde 		}
1331*b83c6205SAbhijit Gangurde 
1332*b83c6205SAbhijit Gangurde 		rc = ionic_prep_recv(qp, wr);
1333*b83c6205SAbhijit Gangurde 		if (rc)
1334*b83c6205SAbhijit Gangurde 			goto out;
1335*b83c6205SAbhijit Gangurde 
1336*b83c6205SAbhijit Gangurde 		wr = wr->next;
1337*b83c6205SAbhijit Gangurde 	}
1338*b83c6205SAbhijit Gangurde 
1339*b83c6205SAbhijit Gangurde out:
1340*b83c6205SAbhijit Gangurde 	if (!cq) {
1341*b83c6205SAbhijit Gangurde 		spin_unlock_irqrestore(&qp->rq_lock, irqflags);
1342*b83c6205SAbhijit Gangurde 		goto out_unlocked;
1343*b83c6205SAbhijit Gangurde 	}
1344*b83c6205SAbhijit Gangurde 	spin_unlock_irqrestore(&qp->rq_lock, irqflags);
1345*b83c6205SAbhijit Gangurde 
1346*b83c6205SAbhijit Gangurde 	spin_lock_irqsave(&cq->lock, irqflags);
1347*b83c6205SAbhijit Gangurde 	spin_lock(&qp->rq_lock);
1348*b83c6205SAbhijit Gangurde 
1349*b83c6205SAbhijit Gangurde 	if (likely(qp->rq.prod != qp->rq_old_prod)) {
1350*b83c6205SAbhijit Gangurde 		/* ring cq doorbell just in time */
1351*b83c6205SAbhijit Gangurde 		spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask;
1352*b83c6205SAbhijit Gangurde 		ionic_reserve_cq(dev, cq, spend);
1353*b83c6205SAbhijit Gangurde 
1354*b83c6205SAbhijit Gangurde 		qp->rq_old_prod = qp->rq.prod;
1355*b83c6205SAbhijit Gangurde 
1356*b83c6205SAbhijit Gangurde 		ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.rq_qtype,
1357*b83c6205SAbhijit Gangurde 				 ionic_queue_dbell_val(&qp->rq));
1358*b83c6205SAbhijit Gangurde 	}
1359*b83c6205SAbhijit Gangurde 
1360*b83c6205SAbhijit Gangurde 	if (qp->rq_flush) {
1361*b83c6205SAbhijit Gangurde 		notify = true;
1362*b83c6205SAbhijit Gangurde 		cq->flush = true;
1363*b83c6205SAbhijit Gangurde 		list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
1364*b83c6205SAbhijit Gangurde 	}
1365*b83c6205SAbhijit Gangurde 
1366*b83c6205SAbhijit Gangurde 	spin_unlock(&qp->rq_lock);
1367*b83c6205SAbhijit Gangurde 	spin_unlock_irqrestore(&cq->lock, irqflags);
1368*b83c6205SAbhijit Gangurde 
1369*b83c6205SAbhijit Gangurde 	if (notify && vcq->ibcq.comp_handler)
1370*b83c6205SAbhijit Gangurde 		vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
1371*b83c6205SAbhijit Gangurde 
1372*b83c6205SAbhijit Gangurde out_unlocked:
1373*b83c6205SAbhijit Gangurde 	*bad = wr;
1374*b83c6205SAbhijit Gangurde 	return rc;
1375*b83c6205SAbhijit Gangurde }
1376*b83c6205SAbhijit Gangurde 
1377*b83c6205SAbhijit Gangurde int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1378*b83c6205SAbhijit Gangurde 		    const struct ib_send_wr **bad)
1379*b83c6205SAbhijit Gangurde {
1380*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
1381*b83c6205SAbhijit Gangurde 	struct ionic_vcq *vcq = to_ionic_vcq(ibqp->send_cq);
1382*b83c6205SAbhijit Gangurde 	struct ionic_qp *qp = to_ionic_qp(ibqp);
1383*b83c6205SAbhijit Gangurde 	struct ionic_cq *cq =
1384*b83c6205SAbhijit Gangurde 		to_ionic_vcq_cq(ibqp->send_cq, qp->udma_idx);
1385*b83c6205SAbhijit Gangurde 
1386*b83c6205SAbhijit Gangurde 	return ionic_post_send_common(dev, vcq, cq, qp, wr, bad);
1387*b83c6205SAbhijit Gangurde }
1388*b83c6205SAbhijit Gangurde 
1389*b83c6205SAbhijit Gangurde int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1390*b83c6205SAbhijit Gangurde 		    const struct ib_recv_wr **bad)
1391*b83c6205SAbhijit Gangurde {
1392*b83c6205SAbhijit Gangurde 	struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
1393*b83c6205SAbhijit Gangurde 	struct ionic_vcq *vcq = to_ionic_vcq(ibqp->recv_cq);
1394*b83c6205SAbhijit Gangurde 	struct ionic_qp *qp = to_ionic_qp(ibqp);
1395*b83c6205SAbhijit Gangurde 	struct ionic_cq *cq =
1396*b83c6205SAbhijit Gangurde 		to_ionic_vcq_cq(ibqp->recv_cq, qp->udma_idx);
1397*b83c6205SAbhijit Gangurde 
1398*b83c6205SAbhijit Gangurde 	return ionic_post_recv_common(dev, vcq, cq, qp, wr, bad);
1399*b83c6205SAbhijit Gangurde }
1400