xref: /linux/drivers/infiniband/hw/mana/qp.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 				      struct net_device *ndev,
10 				      mana_handle_t default_rxobj,
11 				      mana_handle_t ind_table[],
12 				      u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 				      u8 *rx_hash_key)
14 {
15 	struct mana_port_context *mpc = netdev_priv(ndev);
16 	struct mana_cfg_rx_steer_req_v2 *req;
17 	struct mana_cfg_rx_steer_resp resp = {};
18 	mana_handle_t *req_indir_tab;
19 	struct gdma_context *gc;
20 	struct gdma_dev *mdev;
21 	u32 req_buf_size;
22 	int i, err;
23 
24 	gc = dev->gdma_dev->gdma_context;
25 	mdev = &gc->mana;
26 
27 	req_buf_size =
28 		sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
29 	req = kzalloc(req_buf_size, GFP_KERNEL);
30 	if (!req)
31 		return -ENOMEM;
32 
33 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
34 			     sizeof(resp));
35 
36 	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
37 
38 	req->vport = mpc->port_handle;
39 	req->rx_enable = 1;
40 	req->update_default_rxobj = 1;
41 	req->default_rxobj = default_rxobj;
42 	req->hdr.dev_id = mdev->dev_id;
43 
44 	/* If there are more than 1 entries in indirection table, enable RSS */
45 	if (log_ind_tbl_size)
46 		req->rss_enable = true;
47 
48 	req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
49 	req->indir_tab_offset = sizeof(*req);
50 	req->update_indir_tab = true;
51 	req->cqe_coalescing_enable = 1;
52 
53 	req_indir_tab = (mana_handle_t *)(req + 1);
54 	/* The ind table passed to the hardware must have
55 	 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
56 	 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
57 	 */
58 	ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
59 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
60 		req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
61 		ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
62 			  req_indir_tab[i]);
63 	}
64 
65 	req->update_hashkey = true;
66 	if (rx_hash_key_len)
67 		memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
68 	else
69 		netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
70 
71 	ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
72 		  req->vport, default_rxobj);
73 
74 	err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
75 	if (err) {
76 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
77 		goto out;
78 	}
79 
80 	if (resp.hdr.status) {
81 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
82 			   resp.hdr.status);
83 		err = -EPROTO;
84 		goto out;
85 	}
86 
87 	netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
88 		    mpc->port_handle, log_ind_tbl_size);
89 
90 out:
91 	kfree(req);
92 	return err;
93 }
94 
95 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
96 				 struct ib_qp_init_attr *attr,
97 				 struct ib_udata *udata)
98 {
99 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
100 	struct mana_ib_dev *mdev =
101 		container_of(pd->device, struct mana_ib_dev, ib_dev);
102 	struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
103 	struct mana_ib_create_qp_rss_resp resp = {};
104 	struct mana_ib_create_qp_rss ucmd = {};
105 	struct gdma_queue **gdma_cq_allocated;
106 	mana_handle_t *mana_ind_table;
107 	struct mana_port_context *mpc;
108 	struct gdma_queue *gdma_cq;
109 	unsigned int ind_tbl_size;
110 	struct mana_context *mc;
111 	struct net_device *ndev;
112 	struct gdma_context *gc;
113 	struct mana_ib_cq *cq;
114 	struct mana_ib_wq *wq;
115 	struct gdma_dev *gd;
116 	struct mana_eq *eq;
117 	struct ib_cq *ibcq;
118 	struct ib_wq *ibwq;
119 	int i = 0;
120 	u32 port;
121 	int ret;
122 
123 	gc = mdev->gdma_dev->gdma_context;
124 	gd = &gc->mana;
125 	mc = gd->driver_data;
126 
127 	if (!udata || udata->inlen < sizeof(ucmd))
128 		return -EINVAL;
129 
130 	ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
131 	if (ret) {
132 		ibdev_dbg(&mdev->ib_dev,
133 			  "Failed copy from udata for create rss-qp, err %d\n",
134 			  ret);
135 		return ret;
136 	}
137 
138 	if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
139 		ibdev_dbg(&mdev->ib_dev,
140 			  "Requested max_recv_wr %d exceeding limit\n",
141 			  attr->cap.max_recv_wr);
142 		return -EINVAL;
143 	}
144 
145 	if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
146 		ibdev_dbg(&mdev->ib_dev,
147 			  "Requested max_recv_sge %d exceeding limit\n",
148 			  attr->cap.max_recv_sge);
149 		return -EINVAL;
150 	}
151 
152 	ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
153 	if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
154 		ibdev_dbg(&mdev->ib_dev,
155 			  "Indirect table size %d exceeding limit\n",
156 			  ind_tbl_size);
157 		return -EINVAL;
158 	}
159 
160 	if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
161 		ibdev_dbg(&mdev->ib_dev,
162 			  "RX Hash function is not supported, %d\n",
163 			  ucmd.rx_hash_function);
164 		return -EINVAL;
165 	}
166 
167 	/* IB ports start with 1, MANA start with 0 */
168 	port = ucmd.port;
169 	if (port < 1 || port > mc->num_ports) {
170 		ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
171 			  port);
172 		return -EINVAL;
173 	}
174 	ndev = mc->ports[port - 1];
175 	mpc = netdev_priv(ndev);
176 
177 	ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
178 		  ucmd.rx_hash_function, port);
179 
180 	mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
181 				 GFP_KERNEL);
182 	if (!mana_ind_table) {
183 		ret = -ENOMEM;
184 		goto fail;
185 	}
186 
187 	gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
188 				    GFP_KERNEL);
189 	if (!gdma_cq_allocated) {
190 		ret = -ENOMEM;
191 		goto fail;
192 	}
193 
194 	qp->port = port;
195 
196 	for (i = 0; i < ind_tbl_size; i++) {
197 		struct mana_obj_spec wq_spec = {};
198 		struct mana_obj_spec cq_spec = {};
199 
200 		ibwq = ind_tbl->ind_tbl[i];
201 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
202 
203 		ibcq = ibwq->cq;
204 		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
205 
206 		wq_spec.gdma_region = wq->gdma_region;
207 		wq_spec.queue_size = wq->wq_buf_size;
208 
209 		cq_spec.gdma_region = cq->gdma_region;
210 		cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
211 		cq_spec.modr_ctx_id = 0;
212 		eq = &mc->eqs[cq->comp_vector % gc->max_num_queues];
213 		cq_spec.attached_eq = eq->eq->id;
214 
215 		ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
216 					 &wq_spec, &cq_spec, &wq->rx_object);
217 		if (ret) {
218 			/* Do cleanup starting with index i-1 */
219 			i--;
220 			goto fail;
221 		}
222 
223 		/* The GDMA regions are now owned by the WQ object */
224 		wq->gdma_region = GDMA_INVALID_DMA_REGION;
225 		cq->gdma_region = GDMA_INVALID_DMA_REGION;
226 
227 		wq->id = wq_spec.queue_index;
228 		cq->id = cq_spec.queue_index;
229 
230 		ibdev_dbg(&mdev->ib_dev,
231 			  "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
232 			  ret, wq->rx_object, wq->id, cq->id);
233 
234 		resp.entries[i].cqid = cq->id;
235 		resp.entries[i].wqid = wq->id;
236 
237 		mana_ind_table[i] = wq->rx_object;
238 
239 		/* Create CQ table entry */
240 		WARN_ON(gc->cq_table[cq->id]);
241 		gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
242 		if (!gdma_cq) {
243 			ret = -ENOMEM;
244 			goto fail;
245 		}
246 		gdma_cq_allocated[i] = gdma_cq;
247 
248 		gdma_cq->cq.context = cq;
249 		gdma_cq->type = GDMA_CQ;
250 		gdma_cq->cq.callback = mana_ib_cq_handler;
251 		gdma_cq->id = cq->id;
252 		gc->cq_table[cq->id] = gdma_cq;
253 	}
254 	resp.num_entries = i;
255 
256 	ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
257 					 mana_ind_table,
258 					 ind_tbl->log_ind_tbl_size,
259 					 ucmd.rx_hash_key_len,
260 					 ucmd.rx_hash_key);
261 	if (ret)
262 		goto fail;
263 
264 	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
265 	if (ret) {
266 		ibdev_dbg(&mdev->ib_dev,
267 			  "Failed to copy to udata create rss-qp, %d\n",
268 			  ret);
269 		goto fail;
270 	}
271 
272 	kfree(gdma_cq_allocated);
273 	kfree(mana_ind_table);
274 
275 	return 0;
276 
277 fail:
278 	while (i-- > 0) {
279 		ibwq = ind_tbl->ind_tbl[i];
280 		ibcq = ibwq->cq;
281 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
282 		cq = container_of(ibcq, struct mana_ib_cq, ibcq);
283 
284 		gc->cq_table[cq->id] = NULL;
285 		kfree(gdma_cq_allocated[i]);
286 
287 		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
288 	}
289 
290 	kfree(gdma_cq_allocated);
291 	kfree(mana_ind_table);
292 
293 	return ret;
294 }
295 
296 static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
297 				 struct ib_qp_init_attr *attr,
298 				 struct ib_udata *udata)
299 {
300 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
301 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
302 	struct mana_ib_dev *mdev =
303 		container_of(ibpd->device, struct mana_ib_dev, ib_dev);
304 	struct mana_ib_cq *send_cq =
305 		container_of(attr->send_cq, struct mana_ib_cq, ibcq);
306 	struct mana_ib_ucontext *mana_ucontext =
307 		rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
308 					  ibucontext);
309 	struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
310 	struct mana_ib_create_qp_resp resp = {};
311 	struct mana_ib_create_qp ucmd = {};
312 	struct gdma_queue *gdma_cq = NULL;
313 	struct mana_obj_spec wq_spec = {};
314 	struct mana_obj_spec cq_spec = {};
315 	struct mana_port_context *mpc;
316 	struct mana_context *mc;
317 	struct net_device *ndev;
318 	struct ib_umem *umem;
319 	struct mana_eq *eq;
320 	int eq_vec;
321 	u32 port;
322 	int err;
323 
324 	mc = gd->driver_data;
325 
326 	if (!mana_ucontext || udata->inlen < sizeof(ucmd))
327 		return -EINVAL;
328 
329 	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
330 	if (err) {
331 		ibdev_dbg(&mdev->ib_dev,
332 			  "Failed to copy from udata create qp-raw, %d\n", err);
333 		return err;
334 	}
335 
336 	/* IB ports start with 1, MANA Ethernet ports start with 0 */
337 	port = ucmd.port;
338 	if (port < 1 || port > mc->num_ports)
339 		return -EINVAL;
340 
341 	if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
342 		ibdev_dbg(&mdev->ib_dev,
343 			  "Requested max_send_wr %d exceeding limit\n",
344 			  attr->cap.max_send_wr);
345 		return -EINVAL;
346 	}
347 
348 	if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
349 		ibdev_dbg(&mdev->ib_dev,
350 			  "Requested max_send_sge %d exceeding limit\n",
351 			  attr->cap.max_send_sge);
352 		return -EINVAL;
353 	}
354 
355 	ndev = mc->ports[port - 1];
356 	mpc = netdev_priv(ndev);
357 	ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
358 
359 	err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
360 	if (err)
361 		return -ENODEV;
362 
363 	qp->port = port;
364 
365 	ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
366 		  ucmd.sq_buf_addr, ucmd.port);
367 
368 	umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
369 			   IB_ACCESS_LOCAL_WRITE);
370 	if (IS_ERR(umem)) {
371 		err = PTR_ERR(umem);
372 		ibdev_dbg(&mdev->ib_dev,
373 			  "Failed to get umem for create qp-raw, err %d\n",
374 			  err);
375 		goto err_free_vport;
376 	}
377 	qp->sq_umem = umem;
378 
379 	err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
380 					   &qp->sq_gdma_region);
381 	if (err) {
382 		ibdev_dbg(&mdev->ib_dev,
383 			  "Failed to create dma region for create qp-raw, %d\n",
384 			  err);
385 		goto err_release_umem;
386 	}
387 
388 	ibdev_dbg(&mdev->ib_dev,
389 		  "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
390 		  err, qp->sq_gdma_region);
391 
392 	/* Create a WQ on the same port handle used by the Ethernet */
393 	wq_spec.gdma_region = qp->sq_gdma_region;
394 	wq_spec.queue_size = ucmd.sq_buf_size;
395 
396 	cq_spec.gdma_region = send_cq->gdma_region;
397 	cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
398 	cq_spec.modr_ctx_id = 0;
399 	eq_vec = send_cq->comp_vector % gd->gdma_context->max_num_queues;
400 	eq = &mc->eqs[eq_vec];
401 	cq_spec.attached_eq = eq->eq->id;
402 
403 	err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
404 				 &cq_spec, &qp->tx_object);
405 	if (err) {
406 		ibdev_dbg(&mdev->ib_dev,
407 			  "Failed to create wq for create raw-qp, err %d\n",
408 			  err);
409 		goto err_destroy_dma_region;
410 	}
411 
412 	/* The GDMA regions are now owned by the WQ object */
413 	qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
414 	send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
415 
416 	qp->sq_id = wq_spec.queue_index;
417 	send_cq->id = cq_spec.queue_index;
418 
419 	/* Create CQ table entry */
420 	WARN_ON(gd->gdma_context->cq_table[send_cq->id]);
421 	gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
422 	if (!gdma_cq) {
423 		err = -ENOMEM;
424 		goto err_destroy_wq_obj;
425 	}
426 
427 	gdma_cq->cq.context = send_cq;
428 	gdma_cq->type = GDMA_CQ;
429 	gdma_cq->cq.callback = mana_ib_cq_handler;
430 	gdma_cq->id = send_cq->id;
431 	gd->gdma_context->cq_table[send_cq->id] = gdma_cq;
432 
433 	ibdev_dbg(&mdev->ib_dev,
434 		  "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
435 		  qp->tx_object, qp->sq_id, send_cq->id);
436 
437 	resp.sqid = qp->sq_id;
438 	resp.cqid = send_cq->id;
439 	resp.tx_vp_offset = pd->tx_vp_offset;
440 
441 	err = ib_copy_to_udata(udata, &resp, sizeof(resp));
442 	if (err) {
443 		ibdev_dbg(&mdev->ib_dev,
444 			  "Failed copy udata for create qp-raw, %d\n",
445 			  err);
446 		goto err_release_gdma_cq;
447 	}
448 
449 	return 0;
450 
451 err_release_gdma_cq:
452 	kfree(gdma_cq);
453 	gd->gdma_context->cq_table[send_cq->id] = NULL;
454 
455 err_destroy_wq_obj:
456 	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
457 
458 err_destroy_dma_region:
459 	mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
460 
461 err_release_umem:
462 	ib_umem_release(umem);
463 
464 err_free_vport:
465 	mana_ib_uncfg_vport(mdev, pd, port - 1);
466 
467 	return err;
468 }
469 
470 int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
471 		      struct ib_udata *udata)
472 {
473 	switch (attr->qp_type) {
474 	case IB_QPT_RAW_PACKET:
475 		/* When rwq_ind_tbl is used, it's for creating WQs for RSS */
476 		if (attr->rwq_ind_tbl)
477 			return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
478 						     udata);
479 
480 		return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
481 	default:
482 		/* Creating QP other than IB_QPT_RAW_PACKET is not supported */
483 		ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
484 			  attr->qp_type);
485 	}
486 
487 	return -EINVAL;
488 }
489 
490 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
491 		      int attr_mask, struct ib_udata *udata)
492 {
493 	/* modify_qp is not supported by this version of the driver */
494 	return -EOPNOTSUPP;
495 }
496 
497 static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
498 				  struct ib_rwq_ind_table *ind_tbl,
499 				  struct ib_udata *udata)
500 {
501 	struct mana_ib_dev *mdev =
502 		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
503 	struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
504 	struct mana_port_context *mpc;
505 	struct mana_context *mc;
506 	struct net_device *ndev;
507 	struct mana_ib_wq *wq;
508 	struct ib_wq *ibwq;
509 	int i;
510 
511 	mc = gd->driver_data;
512 	ndev = mc->ports[qp->port - 1];
513 	mpc = netdev_priv(ndev);
514 
515 	for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
516 		ibwq = ind_tbl->ind_tbl[i];
517 		wq = container_of(ibwq, struct mana_ib_wq, ibwq);
518 		ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
519 			  wq->rx_object);
520 		mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
521 	}
522 
523 	return 0;
524 }
525 
526 static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
527 {
528 	struct mana_ib_dev *mdev =
529 		container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
530 	struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
531 	struct ib_pd *ibpd = qp->ibqp.pd;
532 	struct mana_port_context *mpc;
533 	struct mana_context *mc;
534 	struct net_device *ndev;
535 	struct mana_ib_pd *pd;
536 
537 	mc = gd->driver_data;
538 	ndev = mc->ports[qp->port - 1];
539 	mpc = netdev_priv(ndev);
540 	pd = container_of(ibpd, struct mana_ib_pd, ibpd);
541 
542 	mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
543 
544 	if (qp->sq_umem) {
545 		mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
546 		ib_umem_release(qp->sq_umem);
547 	}
548 
549 	mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
550 
551 	return 0;
552 }
553 
554 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
555 {
556 	struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
557 
558 	switch (ibqp->qp_type) {
559 	case IB_QPT_RAW_PACKET:
560 		if (ibqp->rwq_ind_tbl)
561 			return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
562 						      udata);
563 
564 		return mana_ib_destroy_qp_raw(qp, udata);
565 
566 	default:
567 		ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
568 			  ibqp->qp_type);
569 	}
570 
571 	return -ENOENT;
572 }
573