xref: /linux/drivers/infiniband/hw/mana/qp.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
mana_ib_cfg_vport_steering(struct mana_ib_dev * dev,struct net_device * ndev,mana_handle_t default_rxobj,mana_handle_t ind_table[],u32 log_ind_tbl_size,u32 rx_hash_key_len,u8 * rx_hash_key)8 static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 				      struct net_device *ndev,
10 				      mana_handle_t default_rxobj,
11 				      mana_handle_t ind_table[],
12 				      u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 				      u8 *rx_hash_key)
14 {
15 	struct mana_port_context *mpc = netdev_priv(ndev);
16 	struct mana_cfg_rx_steer_req_v2 *req;
17 	struct mana_cfg_rx_steer_resp resp = {};
18 	struct gdma_context *gc;
19 	u32 req_buf_size;
20 	int i, err;
21 
22 	gc = mdev_to_gc(dev);
23 
24 	req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE);
25 	req = kzalloc(req_buf_size, GFP_KERNEL);
26 	if (!req)
27 		return -ENOMEM;
28 
29 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
30 			     sizeof(resp));
31 
32 	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
33 
34 	req->vport = mpc->port_handle;
35 	req->rx_enable = 1;
36 	req->update_default_rxobj = 1;
37 	req->default_rxobj = default_rxobj;
38 	req->hdr.dev_id = gc->mana.dev_id;
39 
40 	/* If there are more than 1 entries in indirection table, enable RSS */
41 	if (log_ind_tbl_size)
42 		req->rss_enable = true;
43 
44 	req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE;
45 	req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
46 					 indir_tab);
47 	req->update_indir_tab = true;
48 	req->cqe_coalescing_enable = 1;
49 
50 	/* The ind table passed to the hardware must have
51 	 * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb
52 	 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
53 	 */
54 	ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
55 	for (i = 0; i < MANA_INDIRECT_TABLE_DEF_SIZE; i++) {
56 		req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
57 		ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
58 			  req->indir_tab[i]);
59 	}
60 
61 	req->update_hashkey = true;
62 	if (rx_hash_key_len)
63 		memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
64 	else
65 		netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
66 
67 	ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
68 		  req->vport, default_rxobj);
69 
70 	err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
71 	if (err) {
72 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
73 		goto out;
74 	}
75 
76 	if (resp.hdr.status) {
77 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
78 			   resp.hdr.status);
79 		err = -EPROTO;
80 		goto out;
81 	}
82 
83 	netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
84 		    mpc->port_handle, log_ind_tbl_size);
85 
86 out:
87 	kfree(req);
88 	return err;
89 }
90 
mana_ib_create_qp_rss(struct ib_qp * ibqp,struct ib_pd * pd,struct ib_qp_init_attr * attr,struct ib_udata * udata)91 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
92 				 struct ib_qp_init_attr *attr,
93 				 struct ib_udata *udata)
94 {
95 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
96 	struct mana_ib_dev *mdev =
97 		container_of(pd->device, struct mana_ib_dev, ib_dev);
98 	struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
99 	struct mana_ib_create_qp_rss_resp resp = {};
100 	struct mana_ib_create_qp_rss ucmd = {};
101 	mana_handle_t *mana_ind_table;
102 	struct mana_port_context *mpc;
103 	unsigned int ind_tbl_size;
104 	struct net_device *ndev;
105 	struct mana_ib_cq *cq;
106 	struct mana_ib_wq *wq;
107 	struct mana_eq *eq;
108 	struct ib_cq *ibcq;
109 	struct ib_wq *ibwq;
110 	int i = 0;
111 	u32 port;
112 	int ret;
113 
114 	if (!udata || udata->inlen < sizeof(ucmd))
115 		return -EINVAL;
116 
117 	ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
118 	if (ret) {
119 		ibdev_dbg(&mdev->ib_dev,
120 			  "Failed copy from udata for create rss-qp, err %d\n",
121 			  ret);
122 		return ret;
123 	}
124 
125 	if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
126 		ibdev_dbg(&mdev->ib_dev,
127 			  "Requested max_recv_wr %d exceeding limit\n",
128 			  attr->cap.max_recv_wr);
129 		return -EINVAL;
130 	}
131 
132 	if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
133 		ibdev_dbg(&mdev->ib_dev,
134 			  "Requested max_recv_sge %d exceeding limit\n",
135 			  attr->cap.max_recv_sge);
136 		return -EINVAL;
137 	}
138 
139 	ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
140 	if (ind_tbl_size > MANA_INDIRECT_TABLE_DEF_SIZE) {
141 		ibdev_dbg(&mdev->ib_dev,
142 			  "Indirect table size %d exceeding limit\n",
143 			  ind_tbl_size);
144 		return -EINVAL;
145 	}
146 
147 	if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
148 		ibdev_dbg(&mdev->ib_dev,
149 			  "RX Hash function is not supported, %d\n",
150 			  ucmd.rx_hash_function);
151 		return -EINVAL;
152 	}
153 
154 	/* IB ports start with 1, MANA start with 0 */
155 	port = ucmd.port;
156 	ndev = mana_ib_get_netdev(pd->device, port);
157 	if (!ndev) {
158 		ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
159 			  port);
160 		return -EINVAL;
161 	}
162 	mpc = netdev_priv(ndev);
163 
164 	ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
165 		  ucmd.rx_hash_function, port);
166 
167 	mana_ind_table = kzalloc_objs(mana_handle_t, ind_tbl_size);
168 	if (!mana_ind_table) {
169 		ret = -ENOMEM;
170 		goto fail;
171 	}
172 
173 	qp->port = port;
174 
175 	for (i = 0; i < ind_tbl_size; i++) {
176 		struct mana_obj_spec wq_spec = {};
177 		struct mana_obj_spec cq_spec = {};
178 
179 		ibwq = ind_tbl->ind_tbl[i];
180 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
181 
182 		ibcq = ibwq->cq;
183 		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
184 
185 		wq_spec.gdma_region = wq->queue.gdma_region;
186 		wq_spec.queue_size = wq->wq_buf_size;
187 
188 		cq_spec.gdma_region = cq->queue.gdma_region;
189 		cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
190 		cq_spec.modr_ctx_id = 0;
191 		eq = &mpc->ac->eqs[cq->comp_vector];
192 		cq_spec.attached_eq = eq->eq->id;
193 
194 		ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
195 					 &wq_spec, &cq_spec, &wq->rx_object);
196 		if (ret) {
197 			/* Do cleanup starting with index i-1 */
198 			i--;
199 			goto fail;
200 		}
201 
202 		/* The GDMA regions are now owned by the WQ object */
203 		wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
204 		cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
205 
206 		wq->queue.id = wq_spec.queue_index;
207 		cq->queue.id = cq_spec.queue_index;
208 
209 		ibdev_dbg(&mdev->ib_dev,
210 			  "rx_object 0x%llx wq id %llu cq id %llu\n",
211 			  wq->rx_object, wq->queue.id, cq->queue.id);
212 
213 		resp.entries[i].cqid = cq->queue.id;
214 		resp.entries[i].wqid = wq->queue.id;
215 
216 		mana_ind_table[i] = wq->rx_object;
217 
218 		/* Create CQ table entry */
219 		ret = mana_ib_install_cq_cb(mdev, cq);
220 		if (ret)
221 			goto fail;
222 	}
223 	resp.num_entries = i;
224 
225 	ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
226 					 mana_ind_table,
227 					 ind_tbl->log_ind_tbl_size,
228 					 ucmd.rx_hash_key_len,
229 					 ucmd.rx_hash_key);
230 	if (ret)
231 		goto fail;
232 
233 	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
234 	if (ret) {
235 		ibdev_dbg(&mdev->ib_dev,
236 			  "Failed to copy to udata create rss-qp, %d\n",
237 			  ret);
238 		goto fail;
239 	}
240 
241 	kfree(mana_ind_table);
242 
243 	return 0;
244 
245 fail:
246 	while (i-- > 0) {
247 		ibwq = ind_tbl->ind_tbl[i];
248 		ibcq = ibwq->cq;
249 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
250 		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
251 
252 		mana_ib_remove_cq_cb(mdev, cq);
253 		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
254 	}
255 
256 	kfree(mana_ind_table);
257 
258 	return ret;
259 }
260 
mana_ib_create_qp_raw(struct ib_qp * ibqp,struct ib_pd * ibpd,struct ib_qp_init_attr * attr,struct ib_udata * udata)261 static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
262 				 struct ib_qp_init_attr *attr,
263 				 struct ib_udata *udata)
264 {
265 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
266 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
267 	struct mana_ib_dev *mdev =
268 		container_of(ibpd->device, struct mana_ib_dev, ib_dev);
269 	struct mana_ib_cq *send_cq =
270 		container_of(attr->send_cq, struct mana_ib_cq, ibcq);
271 	struct mana_ib_ucontext *mana_ucontext =
272 		rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
273 					  ibucontext);
274 	struct mana_ib_create_qp_resp resp = {};
275 	struct mana_ib_create_qp ucmd = {};
276 	struct mana_obj_spec wq_spec = {};
277 	struct mana_obj_spec cq_spec = {};
278 	struct mana_port_context *mpc;
279 	struct net_device *ndev;
280 	struct mana_eq *eq;
281 	int eq_vec;
282 	u32 port;
283 	int err;
284 
285 	if (!mana_ucontext || udata->inlen < sizeof(ucmd))
286 		return -EINVAL;
287 
288 	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
289 	if (err) {
290 		ibdev_dbg(&mdev->ib_dev,
291 			  "Failed to copy from udata create qp-raw, %d\n", err);
292 		return err;
293 	}
294 
295 	if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
296 		ibdev_dbg(&mdev->ib_dev,
297 			  "Requested max_send_wr %d exceeding limit\n",
298 			  attr->cap.max_send_wr);
299 		return -EINVAL;
300 	}
301 
302 	if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
303 		ibdev_dbg(&mdev->ib_dev,
304 			  "Requested max_send_sge %d exceeding limit\n",
305 			  attr->cap.max_send_sge);
306 		return -EINVAL;
307 	}
308 
309 	port = ucmd.port;
310 	ndev = mana_ib_get_netdev(ibpd->device, port);
311 	if (!ndev) {
312 		ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
313 			  port);
314 		return -EINVAL;
315 	}
316 	mpc = netdev_priv(ndev);
317 	ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
318 
319 	err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
320 	if (err)
321 		return -ENODEV;
322 
323 	qp->port = port;
324 
325 	ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
326 		  ucmd.sq_buf_addr, ucmd.port);
327 
328 	err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
329 	if (err) {
330 		ibdev_dbg(&mdev->ib_dev,
331 			  "Failed to create queue for create qp-raw, err %d\n", err);
332 		goto err_free_vport;
333 	}
334 
335 	/* Create a WQ on the same port handle used by the Ethernet */
336 	wq_spec.gdma_region = qp->raw_sq.gdma_region;
337 	wq_spec.queue_size = ucmd.sq_buf_size;
338 
339 	cq_spec.gdma_region = send_cq->queue.gdma_region;
340 	cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
341 	cq_spec.modr_ctx_id = 0;
342 	eq_vec = send_cq->comp_vector;
343 	eq = &mpc->ac->eqs[eq_vec];
344 	cq_spec.attached_eq = eq->eq->id;
345 
346 	err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
347 				 &cq_spec, &qp->qp_handle);
348 	if (err) {
349 		ibdev_dbg(&mdev->ib_dev,
350 			  "Failed to create wq for create raw-qp, err %d\n",
351 			  err);
352 		goto err_destroy_queue;
353 	}
354 
355 	/* The GDMA regions are now owned by the WQ object */
356 	qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
357 	send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
358 
359 	qp->raw_sq.id = wq_spec.queue_index;
360 	send_cq->queue.id = cq_spec.queue_index;
361 
362 	/* Create CQ table entry */
363 	err = mana_ib_install_cq_cb(mdev, send_cq);
364 	if (err)
365 		goto err_destroy_wq_obj;
366 
367 	ibdev_dbg(&mdev->ib_dev,
368 		  "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
369 		  qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
370 
371 	resp.sqid = qp->raw_sq.id;
372 	resp.cqid = send_cq->queue.id;
373 	resp.tx_vp_offset = pd->tx_vp_offset;
374 
375 	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
376 	if (err) {
377 		ibdev_dbg(&mdev->ib_dev,
378 			  "Failed copy udata for create qp-raw, %d\n",
379 			  err);
380 		goto err_remove_cq_cb;
381 	}
382 
383 	return 0;
384 
385 err_remove_cq_cb:
386 	mana_ib_remove_cq_cb(mdev, send_cq);
387 
388 err_destroy_wq_obj:
389 	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
390 
391 err_destroy_queue:
392 	mana_ib_destroy_queue(mdev, &qp->raw_sq);
393 
394 err_free_vport:
395 	mana_ib_uncfg_vport(mdev, pd, port);
396 
397 	return err;
398 }
399 
mana_ib_wqe_size(u32 sge,u32 oob_size)400 static u32 mana_ib_wqe_size(u32 sge, u32 oob_size)
401 {
402 	u32 wqe_size = sge * sizeof(struct gdma_sge) + sizeof(struct gdma_wqe) + oob_size;
403 
404 	return ALIGN(wqe_size, GDMA_WQE_BU_SIZE);
405 }
406 
mana_ib_queue_size(struct ib_qp_init_attr * attr,u32 queue_type)407 static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type)
408 {
409 	u32 queue_size;
410 
411 	switch (attr->qp_type) {
412 	case IB_QPT_UD:
413 	case IB_QPT_GSI:
414 		if (queue_type == MANA_UD_SEND_QUEUE)
415 			queue_size = attr->cap.max_send_wr *
416 				mana_ib_wqe_size(attr->cap.max_send_sge, INLINE_OOB_LARGE_SIZE);
417 		else
418 			queue_size = attr->cap.max_recv_wr *
419 				mana_ib_wqe_size(attr->cap.max_recv_sge, INLINE_OOB_SMALL_SIZE);
420 		break;
421 	default:
422 		return 0;
423 	}
424 
425 	return MANA_PAGE_ALIGN(roundup_pow_of_two(queue_size));
426 }
427 
mana_ib_queue_type(struct ib_qp_init_attr * attr,u32 queue_type)428 static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32 queue_type)
429 {
430 	enum gdma_queue_type type;
431 
432 	switch (attr->qp_type) {
433 	case IB_QPT_UD:
434 	case IB_QPT_GSI:
435 		if (queue_type == MANA_UD_SEND_QUEUE)
436 			type = GDMA_SQ;
437 		else
438 			type = GDMA_RQ;
439 		break;
440 	default:
441 		type = GDMA_INVALID_QUEUE;
442 	}
443 	return type;
444 }
445 
mana_table_store_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)446 static int mana_table_store_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
447 {
448 	return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
449 			     GFP_KERNEL);
450 }
451 
mana_table_remove_rc_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)452 static void mana_table_remove_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
453 {
454 	xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
455 }
456 
mana_table_store_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)457 static int mana_table_store_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
458 {
459 	u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
460 	u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
461 	int err;
462 
463 	err = xa_insert_irq(&mdev->qp_table_wq, qids, qp, GFP_KERNEL);
464 	if (err)
465 		return err;
466 
467 	err = xa_insert_irq(&mdev->qp_table_wq, qidr, qp, GFP_KERNEL);
468 	if (err)
469 		goto remove_sq;
470 
471 	return 0;
472 
473 remove_sq:
474 	xa_erase_irq(&mdev->qp_table_wq, qids);
475 	return err;
476 }
477 
mana_table_remove_ud_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)478 static void mana_table_remove_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
479 {
480 	u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
481 	u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
482 
483 	xa_erase_irq(&mdev->qp_table_wq, qids);
484 	xa_erase_irq(&mdev->qp_table_wq, qidr);
485 }
486 
mana_table_store_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)487 static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
488 {
489 	refcount_set(&qp->refcount, 1);
490 	init_completion(&qp->free);
491 
492 	switch (qp->ibqp.qp_type) {
493 	case IB_QPT_RC:
494 		return mana_table_store_rc_qp(mdev, qp);
495 	case IB_QPT_UD:
496 	case IB_QPT_GSI:
497 		return mana_table_store_ud_qp(mdev, qp);
498 	default:
499 		ibdev_dbg(&mdev->ib_dev, "Unknown QP type for storing in mana table, %d\n",
500 			  qp->ibqp.qp_type);
501 	}
502 
503 	return -EINVAL;
504 }
505 
mana_table_remove_qp(struct mana_ib_dev * mdev,struct mana_ib_qp * qp)506 static void mana_table_remove_qp(struct mana_ib_dev *mdev,
507 				 struct mana_ib_qp *qp)
508 {
509 	switch (qp->ibqp.qp_type) {
510 	case IB_QPT_RC:
511 		mana_table_remove_rc_qp(mdev, qp);
512 		break;
513 	case IB_QPT_UD:
514 	case IB_QPT_GSI:
515 		mana_table_remove_ud_qp(mdev, qp);
516 		break;
517 	default:
518 		ibdev_dbg(&mdev->ib_dev, "Unknown QP type for removing from mana table, %d\n",
519 			  qp->ibqp.qp_type);
520 		return;
521 	}
522 	mana_put_qp_ref(qp);
523 	wait_for_completion(&qp->free);
524 }
525 
mana_ib_create_rc_qp(struct ib_qp * ibqp,struct ib_pd * ibpd,struct ib_qp_init_attr * attr,struct ib_udata * udata)526 static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
527 				struct ib_qp_init_attr *attr, struct ib_udata *udata)
528 {
529 	struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
530 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
531 	struct mana_ib_create_rc_qp_resp resp = {};
532 	struct mana_ib_ucontext *mana_ucontext;
533 	struct mana_ib_create_rc_qp ucmd = {};
534 	int i, err, j;
535 	u64 flags = 0;
536 	u32 doorbell;
537 
538 	if (!udata || udata->inlen < sizeof(ucmd))
539 		return -EINVAL;
540 
541 	mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext);
542 	doorbell = mana_ucontext->doorbell;
543 	flags = MANA_RC_FLAG_NO_FMR;
544 	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
545 	if (err) {
546 		ibdev_dbg(&mdev->ib_dev, "Failed to copy from udata, %d\n", err);
547 		return err;
548 	}
549 
550 	for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
551 		/* skip FMR for user-level RC QPs */
552 		if (i == MANA_RC_SEND_QUEUE_FMR) {
553 			qp->rc_qp.queues[i].id = INVALID_QUEUE_ID;
554 			qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
555 			continue;
556 		}
557 		err = mana_ib_create_queue(mdev, ucmd.queue_buf[j], ucmd.queue_size[j],
558 					   &qp->rc_qp.queues[i]);
559 		if (err) {
560 			ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n", i, err);
561 			goto destroy_queues;
562 		}
563 		j++;
564 	}
565 
566 	err = mana_ib_gd_create_rc_qp(mdev, qp, attr, doorbell, flags);
567 	if (err) {
568 		ibdev_err(&mdev->ib_dev, "Failed to create rc qp  %d\n", err);
569 		goto destroy_queues;
570 	}
571 	qp->ibqp.qp_num = qp->rc_qp.queues[MANA_RC_RECV_QUEUE_RESPONDER].id;
572 	qp->port = attr->port_num;
573 
574 	if (udata) {
575 		for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
576 			if (i == MANA_RC_SEND_QUEUE_FMR)
577 				continue;
578 			resp.queue_id[j] = qp->rc_qp.queues[i].id;
579 			j++;
580 		}
581 		err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
582 		if (err) {
583 			ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
584 			goto destroy_qp;
585 		}
586 	}
587 
588 	err = mana_table_store_qp(mdev, qp);
589 	if (err)
590 		goto destroy_qp;
591 
592 	return 0;
593 
594 destroy_qp:
595 	mana_ib_gd_destroy_rc_qp(mdev, qp);
596 destroy_queues:
597 	while (i-- > 0)
598 		mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
599 	return err;
600 }
601 
mana_add_qp_to_cqs(struct mana_ib_qp * qp)602 static void mana_add_qp_to_cqs(struct mana_ib_qp *qp)
603 {
604 	struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
605 	struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
606 	unsigned long flags;
607 
608 	spin_lock_irqsave(&send_cq->cq_lock, flags);
609 	list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
610 	spin_unlock_irqrestore(&send_cq->cq_lock, flags);
611 
612 	spin_lock_irqsave(&recv_cq->cq_lock, flags);
613 	list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
614 	spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
615 }
616 
mana_remove_qp_from_cqs(struct mana_ib_qp * qp)617 static void mana_remove_qp_from_cqs(struct mana_ib_qp *qp)
618 {
619 	struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
620 	struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
621 	unsigned long flags;
622 
623 	spin_lock_irqsave(&send_cq->cq_lock, flags);
624 	list_del(&qp->cq_send_list);
625 	spin_unlock_irqrestore(&send_cq->cq_lock, flags);
626 
627 	spin_lock_irqsave(&recv_cq->cq_lock, flags);
628 	list_del(&qp->cq_recv_list);
629 	spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
630 }
631 
mana_ib_create_ud_qp(struct ib_qp * ibqp,struct ib_pd * ibpd,struct ib_qp_init_attr * attr,struct ib_udata * udata)632 static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
633 				struct ib_qp_init_attr *attr, struct ib_udata *udata)
634 {
635 	struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
636 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
637 	u32 doorbell, queue_size;
638 	int i, err;
639 
640 	if (udata) {
641 		ibdev_dbg(&mdev->ib_dev, "User-level UD QPs are not supported\n");
642 		return -EOPNOTSUPP;
643 	}
644 
645 	for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) {
646 		queue_size = mana_ib_queue_size(attr, i);
647 		err = mana_ib_create_kernel_queue(mdev, queue_size, mana_ib_queue_type(attr, i),
648 						  &qp->ud_qp.queues[i]);
649 		if (err) {
650 			ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n",
651 				  i, err);
652 			goto destroy_queues;
653 		}
654 	}
655 	doorbell = mdev->gdma_dev->doorbell;
656 
657 	err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
658 				  sizeof(struct ud_rq_shadow_wqe));
659 	if (err) {
660 		ibdev_err(&mdev->ib_dev, "Failed to create shadow rq err %d\n", err);
661 		goto destroy_queues;
662 	}
663 	err = create_shadow_queue(&qp->shadow_sq, attr->cap.max_send_wr,
664 				  sizeof(struct ud_sq_shadow_wqe));
665 	if (err) {
666 		ibdev_err(&mdev->ib_dev, "Failed to create shadow sq err %d\n", err);
667 		goto destroy_shadow_queues;
668 	}
669 
670 	err = mana_ib_gd_create_ud_qp(mdev, qp, attr, doorbell, attr->qp_type);
671 	if (err) {
672 		ibdev_err(&mdev->ib_dev, "Failed to create ud qp  %d\n", err);
673 		goto destroy_shadow_queues;
674 	}
675 	qp->ibqp.qp_num = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
676 	qp->port = attr->port_num;
677 
678 	for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
679 		qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id;
680 
681 	err = mana_table_store_qp(mdev, qp);
682 	if (err)
683 		goto destroy_qp;
684 
685 	mana_add_qp_to_cqs(qp);
686 
687 	return 0;
688 
689 destroy_qp:
690 	mana_ib_gd_destroy_ud_qp(mdev, qp);
691 destroy_shadow_queues:
692 	destroy_shadow_queue(&qp->shadow_rq);
693 	destroy_shadow_queue(&qp->shadow_sq);
694 destroy_queues:
695 	while (i-- > 0)
696 		mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
697 	return err;
698 }
699 
mana_ib_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * attr,struct ib_udata * udata)700 int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
701 		      struct ib_udata *udata)
702 {
703 	switch (attr->qp_type) {
704 	case IB_QPT_RAW_PACKET:
705 		/* When rwq_ind_tbl is used, it's for creating WQs for RSS */
706 		if (attr->rwq_ind_tbl)
707 			return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
708 						     udata);
709 
710 		return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
711 	case IB_QPT_RC:
712 		return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata);
713 	case IB_QPT_UD:
714 	case IB_QPT_GSI:
715 		return mana_ib_create_ud_qp(ibqp, ibqp->pd, attr, udata);
716 	default:
717 		ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
718 			  attr->qp_type);
719 	}
720 
721 	return -EINVAL;
722 }
723 
mana_ib_gd_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)724 static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
725 				int attr_mask, struct ib_udata *udata)
726 {
727 	struct mana_ib_dev *mdev = container_of(ibqp->device, struct mana_ib_dev, ib_dev);
728 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
729 	struct mana_rnic_set_qp_state_resp resp = {};
730 	struct mana_rnic_set_qp_state_req req = {};
731 	struct gdma_context *gc = mdev_to_gc(mdev);
732 	struct mana_port_context *mpc;
733 	struct net_device *ndev;
734 	int err;
735 
736 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
737 
738 	req.hdr.req.msg_version = GDMA_MESSAGE_V3;
739 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
740 	req.adapter = mdev->adapter_handle;
741 	req.qp_handle = qp->qp_handle;
742 	req.qp_state = attr->qp_state;
743 	req.attr_mask = attr_mask;
744 	req.path_mtu = attr->path_mtu;
745 	req.rq_psn = attr->rq_psn;
746 	req.sq_psn = attr->sq_psn;
747 	req.dest_qpn = attr->dest_qp_num;
748 	req.max_dest_rd_atomic = attr->max_dest_rd_atomic;
749 	req.retry_cnt = attr->retry_cnt;
750 	req.rnr_retry = attr->rnr_retry;
751 	req.min_rnr_timer = attr->min_rnr_timer;
752 	req.rate_limit = attr->rate_limit;
753 	req.qkey = attr->qkey;
754 	req.local_ack_timeout = attr->timeout;
755 	req.qp_access_flags = attr->qp_access_flags;
756 	req.max_rd_atomic = attr->max_rd_atomic;
757 
758 	if (attr_mask & IB_QP_AV) {
759 		ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
760 		if (!ndev) {
761 			ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n",
762 				  ibqp->port, ibqp->qp_num);
763 			return -EINVAL;
764 		}
765 		mpc = netdev_priv(ndev);
766 		copy_in_reverse(req.ah_attr.src_mac, mpc->mac_addr, ETH_ALEN);
767 		copy_in_reverse(req.ah_attr.dest_mac, attr->ah_attr.roce.dmac, ETH_ALEN);
768 		copy_in_reverse(req.ah_attr.src_addr, attr->ah_attr.grh.sgid_attr->gid.raw,
769 				sizeof(union ib_gid));
770 		copy_in_reverse(req.ah_attr.dest_addr, attr->ah_attr.grh.dgid.raw,
771 				sizeof(union ib_gid));
772 		if (rdma_gid_attr_network_type(attr->ah_attr.grh.sgid_attr) == RDMA_NETWORK_IPV4) {
773 			req.ah_attr.src_addr_type = SGID_TYPE_IPV4;
774 			req.ah_attr.dest_addr_type = SGID_TYPE_IPV4;
775 		} else {
776 			req.ah_attr.src_addr_type = SGID_TYPE_IPV6;
777 			req.ah_attr.dest_addr_type = SGID_TYPE_IPV6;
778 		}
779 		req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
780 		req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
781 							  ibqp->qp_num, attr->dest_qp_num);
782 		req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
783 		req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
784 		req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
785 	}
786 
787 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
788 	if (err) {
789 		ibdev_err(&mdev->ib_dev, "Failed modify qp err %d", err);
790 		return err;
791 	}
792 
793 	return 0;
794 }
795 
mana_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)796 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
797 		      int attr_mask, struct ib_udata *udata)
798 {
799 	switch (ibqp->qp_type) {
800 	case IB_QPT_RC:
801 	case IB_QPT_UD:
802 	case IB_QPT_GSI:
803 		return mana_ib_gd_modify_qp(ibqp, attr, attr_mask, udata);
804 	default:
805 		ibdev_dbg(ibqp->device, "Modify QP type %u not supported", ibqp->qp_type);
806 		return -EOPNOTSUPP;
807 	}
808 }
809 
mana_ib_destroy_qp_rss(struct mana_ib_qp * qp,struct ib_rwq_ind_table * ind_tbl,struct ib_udata * udata)810 static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
811 				  struct ib_rwq_ind_table *ind_tbl,
812 				  struct ib_udata *udata)
813 {
814 	struct mana_ib_dev *mdev =
815 		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
816 	struct mana_port_context *mpc;
817 	struct net_device *ndev;
818 	struct mana_ib_wq *wq;
819 	struct ib_wq *ibwq;
820 	int i;
821 
822 	ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
823 	mpc = netdev_priv(ndev);
824 
825 	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
826 		ibwq = ind_tbl->ind_tbl[i];
827 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
828 		ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
829 			  wq->rx_object);
830 		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
831 	}
832 
833 	return 0;
834 }
835 
mana_ib_destroy_qp_raw(struct mana_ib_qp * qp,struct ib_udata * udata)836 static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
837 {
838 	struct mana_ib_dev *mdev =
839 		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
840 	struct ib_pd *ibpd = qp->ibqp.pd;
841 	struct mana_port_context *mpc;
842 	struct net_device *ndev;
843 	struct mana_ib_pd *pd;
844 
845 	ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
846 	mpc = netdev_priv(ndev);
847 	pd = container_of(ibpd, struct mana_ib_pd, ibpd);
848 
849 	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
850 
851 	mana_ib_destroy_queue(mdev, &qp->raw_sq);
852 
853 	mana_ib_uncfg_vport(mdev, pd, qp->port);
854 
855 	return 0;
856 }
857 
mana_ib_destroy_rc_qp(struct mana_ib_qp * qp,struct ib_udata * udata)858 static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
859 {
860 	struct mana_ib_dev *mdev =
861 		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
862 	int i;
863 
864 	mana_table_remove_qp(mdev, qp);
865 
866 	/* Ignore return code as there is not much we can do about it.
867 	 * The error message is printed inside.
868 	 */
869 	mana_ib_gd_destroy_rc_qp(mdev, qp);
870 	for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i)
871 		mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
872 
873 	return 0;
874 }
875 
mana_ib_destroy_ud_qp(struct mana_ib_qp * qp,struct ib_udata * udata)876 static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
877 {
878 	struct mana_ib_dev *mdev =
879 		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
880 	int i;
881 
882 	mana_remove_qp_from_cqs(qp);
883 	mana_table_remove_qp(mdev, qp);
884 
885 	destroy_shadow_queue(&qp->shadow_rq);
886 	destroy_shadow_queue(&qp->shadow_sq);
887 
888 	/* Ignore return code as there is not much we can do about it.
889 	 * The error message is printed inside.
890 	 */
891 	mana_ib_gd_destroy_ud_qp(mdev, qp);
892 	for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
893 		mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]);
894 
895 	return 0;
896 }
897 
mana_ib_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)898 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
899 {
900 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
901 
902 	switch (ibqp->qp_type) {
903 	case IB_QPT_RAW_PACKET:
904 		if (ibqp->rwq_ind_tbl)
905 			return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
906 						      udata);
907 
908 		return mana_ib_destroy_qp_raw(qp, udata);
909 	case IB_QPT_RC:
910 		return mana_ib_destroy_rc_qp(qp, udata);
911 	case IB_QPT_UD:
912 	case IB_QPT_GSI:
913 		return mana_ib_destroy_ud_qp(qp, udata);
914 	default:
915 		ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
916 			  ibqp->qp_type);
917 	}
918 
919 	return -ENOENT;
920 }
921