xref: /linux/drivers/infiniband/hw/mana/cq.c (revision dd91b5e1d6448794c07378d1be12e3261c8769e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
mana_ib_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 		      struct uverbs_attr_bundle *attrs)
10 {
11 	struct ib_udata *udata = &attrs->driver_udata;
12 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
13 	struct mana_ib_create_cq_resp resp = {};
14 	struct mana_ib_ucontext *mana_ucontext;
15 	struct ib_device *ibdev = ibcq->device;
16 	struct mana_ib_create_cq ucmd = {};
17 	struct mana_ib_dev *mdev;
18 	bool is_rnic_cq;
19 	u32 doorbell;
20 	u32 buf_size;
21 	int err;
22 
23 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
24 
25 	cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
26 	cq->cq_handle = INVALID_MANA_HANDLE;
27 
28 	if (udata) {
29 		if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
30 			return -EINVAL;
31 
32 		err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
33 		if (err) {
34 			ibdev_dbg(ibdev, "Failed to copy from udata for create cq, %d\n", err);
35 			return err;
36 		}
37 
38 		is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
39 
40 		if ((!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) ||
41 		    attr->cqe > U32_MAX / COMP_ENTRY_SIZE) {
42 			ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
43 			return -EINVAL;
44 		}
45 
46 		cq->cqe = attr->cqe;
47 		err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
48 					   &cq->queue);
49 		if (err) {
50 			ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
51 			return err;
52 		}
53 
54 		mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
55 							  ibucontext);
56 		doorbell = mana_ucontext->doorbell;
57 	} else {
58 		is_rnic_cq = true;
59 		buf_size = MANA_PAGE_ALIGN(roundup_pow_of_two(attr->cqe * COMP_ENTRY_SIZE));
60 		cq->cqe = buf_size / COMP_ENTRY_SIZE;
61 		err = mana_ib_create_kernel_queue(mdev, buf_size, GDMA_CQ, &cq->queue);
62 		if (err) {
63 			ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
64 			return err;
65 		}
66 		doorbell = mdev->gdma_dev->doorbell;
67 	}
68 
69 	if (is_rnic_cq) {
70 		err = mana_ib_gd_create_cq(mdev, cq, doorbell);
71 		if (err) {
72 			ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
73 			goto err_destroy_queue;
74 		}
75 
76 		err = mana_ib_install_cq_cb(mdev, cq);
77 		if (err) {
78 			ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
79 			goto err_destroy_rnic_cq;
80 		}
81 	}
82 
83 	if (udata) {
84 		resp.cqid = cq->queue.id;
85 		err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
86 		if (err) {
87 			ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
88 			goto err_remove_cq_cb;
89 		}
90 	}
91 
92 	spin_lock_init(&cq->cq_lock);
93 	INIT_LIST_HEAD(&cq->list_send_qp);
94 	INIT_LIST_HEAD(&cq->list_recv_qp);
95 
96 	return 0;
97 
98 err_remove_cq_cb:
99 	mana_ib_remove_cq_cb(mdev, cq);
100 err_destroy_rnic_cq:
101 	mana_ib_gd_destroy_cq(mdev, cq);
102 err_destroy_queue:
103 	mana_ib_destroy_queue(mdev, &cq->queue);
104 
105 	return err;
106 }
107 
mana_ib_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)108 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
109 {
110 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
111 	struct ib_device *ibdev = ibcq->device;
112 	struct mana_ib_dev *mdev;
113 
114 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
115 
116 	mana_ib_remove_cq_cb(mdev, cq);
117 
118 	/* Ignore return code as there is not much we can do about it.
119 	 * The error message is printed inside.
120 	 */
121 	mana_ib_gd_destroy_cq(mdev, cq);
122 
123 	mana_ib_destroy_queue(mdev, &cq->queue);
124 
125 	return 0;
126 }
127 
mana_ib_cq_handler(void * ctx,struct gdma_queue * gdma_cq)128 static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
129 {
130 	struct mana_ib_cq *cq = ctx;
131 
132 	if (cq->ibcq.comp_handler)
133 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
134 }
135 
mana_ib_install_cq_cb(struct mana_ib_dev * mdev,struct mana_ib_cq * cq)136 int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
137 {
138 	struct gdma_context *gc = mdev_to_gc(mdev);
139 	struct gdma_queue *gdma_cq;
140 
141 	if (cq->queue.id >= gc->max_num_cqs)
142 		return -EINVAL;
143 	/* Create CQ table entry */
144 	WARN_ON(gc->cq_table[cq->queue.id]);
145 	if (cq->queue.kmem)
146 		gdma_cq = cq->queue.kmem;
147 	else
148 		gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
149 	if (!gdma_cq)
150 		return -ENOMEM;
151 
152 	gdma_cq->cq.context = cq;
153 	gdma_cq->type = GDMA_CQ;
154 	gdma_cq->cq.callback = mana_ib_cq_handler;
155 	gdma_cq->id = cq->queue.id;
156 	gc->cq_table[cq->queue.id] = gdma_cq;
157 	return 0;
158 }
159 
mana_ib_remove_cq_cb(struct mana_ib_dev * mdev,struct mana_ib_cq * cq)160 void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
161 {
162 	struct gdma_context *gc = mdev_to_gc(mdev);
163 
164 	if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
165 		return;
166 
167 	if (cq->queue.kmem)
168 	/* Then it will be cleaned and removed by the mana */
169 		return;
170 
171 	kfree(gc->cq_table[cq->queue.id]);
172 	gc->cq_table[cq->queue.id] = NULL;
173 }
174 
mana_ib_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)175 int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
176 {
177 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
178 	struct gdma_queue *gdma_cq = cq->queue.kmem;
179 
180 	if (!gdma_cq)
181 		return -EINVAL;
182 
183 	mana_gd_ring_cq(gdma_cq, SET_ARM_BIT);
184 	return 0;
185 }
186 
handle_ud_sq_cqe(struct mana_ib_qp * qp,struct gdma_comp * cqe)187 static inline void handle_ud_sq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
188 {
189 	struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
190 	struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem;
191 	struct ud_sq_shadow_wqe *shadow_wqe;
192 
193 	shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq);
194 	if (!shadow_wqe)
195 		return;
196 
197 	shadow_wqe->header.error_code = rdma_cqe->ud_send.vendor_error;
198 
199 	wq->tail += shadow_wqe->header.posted_wqe_size;
200 	shadow_queue_advance_next_to_complete(&qp->shadow_sq);
201 }
202 
handle_ud_rq_cqe(struct mana_ib_qp * qp,struct gdma_comp * cqe)203 static inline void handle_ud_rq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
204 {
205 	struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
206 	struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem;
207 	struct ud_rq_shadow_wqe *shadow_wqe;
208 
209 	shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_rq);
210 	if (!shadow_wqe)
211 		return;
212 
213 	shadow_wqe->byte_len = rdma_cqe->ud_recv.msg_len;
214 	shadow_wqe->src_qpn = rdma_cqe->ud_recv.src_qpn;
215 	shadow_wqe->header.error_code = IB_WC_SUCCESS;
216 
217 	wq->tail += shadow_wqe->header.posted_wqe_size;
218 	shadow_queue_advance_next_to_complete(&qp->shadow_rq);
219 }
220 
mana_handle_cqe(struct mana_ib_dev * mdev,struct gdma_comp * cqe)221 static void mana_handle_cqe(struct mana_ib_dev *mdev, struct gdma_comp *cqe)
222 {
223 	struct mana_ib_qp *qp = mana_get_qp_ref(mdev, cqe->wq_num, cqe->is_sq);
224 
225 	if (!qp)
226 		return;
227 
228 	if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) {
229 		if (cqe->is_sq)
230 			handle_ud_sq_cqe(qp, cqe);
231 		else
232 			handle_ud_rq_cqe(qp, cqe);
233 	}
234 
235 	mana_put_qp_ref(qp);
236 }
237 
fill_verbs_from_shadow_wqe(struct mana_ib_qp * qp,struct ib_wc * wc,const struct shadow_wqe_header * shadow_wqe)238 static void fill_verbs_from_shadow_wqe(struct mana_ib_qp *qp, struct ib_wc *wc,
239 				       const struct shadow_wqe_header *shadow_wqe)
240 {
241 	const struct ud_rq_shadow_wqe *ud_wqe = (const struct ud_rq_shadow_wqe *)shadow_wqe;
242 
243 	wc->wr_id = shadow_wqe->wr_id;
244 	wc->status = shadow_wqe->error_code;
245 	wc->opcode = shadow_wqe->opcode;
246 	wc->vendor_err = shadow_wqe->error_code;
247 	wc->wc_flags = 0;
248 	wc->qp = &qp->ibqp;
249 	wc->pkey_index = 0;
250 
251 	if (shadow_wqe->opcode == IB_WC_RECV) {
252 		wc->byte_len = ud_wqe->byte_len;
253 		wc->src_qp = ud_wqe->src_qpn;
254 		wc->wc_flags |= IB_WC_GRH;
255 	}
256 }
257 
mana_process_completions(struct mana_ib_cq * cq,int nwc,struct ib_wc * wc)258 static int mana_process_completions(struct mana_ib_cq *cq, int nwc, struct ib_wc *wc)
259 {
260 	struct shadow_wqe_header *shadow_wqe;
261 	struct mana_ib_qp *qp;
262 	int wc_index = 0;
263 
264 	/* process send shadow queue completions  */
265 	list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
266 		while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_sq))
267 				!= NULL) {
268 			if (wc_index >= nwc)
269 				goto out;
270 
271 			fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
272 			shadow_queue_advance_consumer(&qp->shadow_sq);
273 			wc_index++;
274 		}
275 	}
276 
277 	/* process recv shadow queue completions */
278 	list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
279 		while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_rq))
280 				!= NULL) {
281 			if (wc_index >= nwc)
282 				goto out;
283 
284 			fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
285 			shadow_queue_advance_consumer(&qp->shadow_rq);
286 			wc_index++;
287 		}
288 	}
289 
290 out:
291 	return wc_index;
292 }
293 
mana_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)294 int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
295 {
296 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
297 	struct mana_ib_dev *mdev = container_of(ibcq->device, struct mana_ib_dev, ib_dev);
298 	struct gdma_queue *queue = cq->queue.kmem;
299 	struct gdma_comp gdma_cqe;
300 	unsigned long flags;
301 	int num_polled = 0;
302 	int comp_read, i;
303 
304 	spin_lock_irqsave(&cq->cq_lock, flags);
305 	for (i = 0; i < num_entries; i++) {
306 		comp_read = mana_gd_poll_cq(queue, &gdma_cqe, 1);
307 		if (comp_read < 1)
308 			break;
309 		mana_handle_cqe(mdev, &gdma_cqe);
310 	}
311 
312 	num_polled = mana_process_completions(cq, num_entries, wc);
313 	spin_unlock_irqrestore(&cq->cq_lock, flags);
314 
315 	return num_polled;
316 }
317