xref: /linux/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c (revision 0aea30a07ec6b50de0fc5f5b2ec34a68ead86b61)
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/bug.h>
34 #include <linux/errno.h>
35 #include <linux/spinlock.h>
36 
37 #include "usnic_log.h"
38 #include "usnic_vnic.h"
39 #include "usnic_fwd.h"
40 #include "usnic_uiom.h"
41 #include "usnic_debugfs.h"
42 #include "usnic_ib_qp_grp.h"
43 #include "usnic_ib_sysfs.h"
44 #include "usnic_transport.h"
45 
46 #define DFLT_RQ_IDX	0
47 
48 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
49 {
50 	switch (state) {
51 	case IB_QPS_RESET:
52 		return "Rst";
53 	case IB_QPS_INIT:
54 		return "Init";
55 	case IB_QPS_RTR:
56 		return "RTR";
57 	case IB_QPS_RTS:
58 		return "RTS";
59 	case IB_QPS_SQD:
60 		return "SQD";
61 	case IB_QPS_SQE:
62 		return "SQE";
63 	case IB_QPS_ERR:
64 		return "ERR";
65 	default:
66 		return "UNKNOWN STATE";
67 
68 	}
69 }
70 
71 int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
72 {
73 	return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
74 }
75 
76 int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
77 {
78 	struct usnic_ib_qp_grp *qp_grp = obj;
79 	struct usnic_ib_qp_grp_flow *default_flow;
80 	if (obj) {
81 		default_flow = list_first_entry(&qp_grp->flows_lst,
82 					struct usnic_ib_qp_grp_flow, link);
83 		return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
84 					qp_grp->ibqp.qp_num,
85 					usnic_ib_qp_grp_state_to_string(
86 							qp_grp->state),
87 					qp_grp->owner_pid,
88 					usnic_vnic_get_index(qp_grp->vf->vnic),
89 					default_flow->flow->flow_id);
90 	} else {
91 		return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
92 	}
93 }
94 
95 static struct usnic_vnic_res_chunk *
96 get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
97 {
98 	lockdep_assert_held(&qp_grp->lock);
99 	/*
100 	 * The QP res chunk, used to derive qp indices,
101 	 * are just indices of the RQs
102 	 */
103 	return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
104 }
105 
106 static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
107 {
108 
109 	int status;
110 	int i, vnic_idx;
111 	struct usnic_vnic_res_chunk *res_chunk;
112 	struct usnic_vnic_res *res;
113 
114 	lockdep_assert_held(&qp_grp->lock);
115 
116 	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
117 
118 	res_chunk = get_qp_res_chunk(qp_grp);
119 	if (IS_ERR(res_chunk)) {
120 		usnic_err("Unable to get qp res with err %ld\n",
121 				PTR_ERR(res_chunk));
122 		return PTR_ERR(res_chunk);
123 	}
124 
125 	for (i = 0; i < res_chunk->cnt; i++) {
126 		res = res_chunk->res[i];
127 		status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
128 						res->vnic_idx);
129 		if (status) {
130 			usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
131 					res->vnic_idx, qp_grp->ufdev->name,
132 					vnic_idx, status);
133 			goto out_err;
134 		}
135 	}
136 
137 	return 0;
138 
139 out_err:
140 	for (i--; i >= 0; i--) {
141 		res = res_chunk->res[i];
142 		usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
143 					res->vnic_idx);
144 	}
145 
146 	return status;
147 }
148 
149 static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
150 {
151 	int i, vnic_idx;
152 	struct usnic_vnic_res_chunk *res_chunk;
153 	struct usnic_vnic_res *res;
154 	int status = 0;
155 
156 	lockdep_assert_held(&qp_grp->lock);
157 	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
158 
159 	res_chunk = get_qp_res_chunk(qp_grp);
160 	if (IS_ERR(res_chunk)) {
161 		usnic_err("Unable to get qp res with err %ld\n",
162 			PTR_ERR(res_chunk));
163 		return PTR_ERR(res_chunk);
164 	}
165 
166 	for (i = 0; i < res_chunk->cnt; i++) {
167 		res = res_chunk->res[i];
168 		status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
169 						res->vnic_idx);
170 		if (status) {
171 			usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
172 					res->vnic_idx,
173 					qp_grp->ufdev->name,
174 					vnic_idx, status);
175 		}
176 	}
177 
178 	return status;
179 
180 }
181 
182 static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
183 				struct usnic_filter_action *uaction)
184 {
185 	struct usnic_vnic_res_chunk *res_chunk;
186 
187 	res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
188 	if (IS_ERR(res_chunk)) {
189 		usnic_err("Unable to get %s with err %ld\n",
190 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
191 			PTR_ERR(res_chunk));
192 		return PTR_ERR(res_chunk);
193 	}
194 
195 	uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
196 	uaction->action.type = FILTER_ACTION_RQ_STEERING;
197 	uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
198 
199 	return 0;
200 }
201 
202 static struct usnic_ib_qp_grp_flow*
203 create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
204 			struct usnic_transport_spec *trans_spec)
205 {
206 	uint16_t port_num;
207 	int err;
208 	struct filter filter;
209 	struct usnic_filter_action uaction;
210 	struct usnic_ib_qp_grp_flow *qp_flow;
211 	struct usnic_fwd_flow *flow;
212 	enum usnic_transport_type trans_type;
213 
214 	trans_type = trans_spec->trans_type;
215 	port_num = trans_spec->usnic_roce.port_num;
216 
217 	/* Reserve Port */
218 	port_num = usnic_transport_rsrv_port(trans_type, port_num);
219 	if (port_num == 0)
220 		return ERR_PTR(-EINVAL);
221 
222 	/* Create Flow */
223 	usnic_fwd_init_usnic_filter(&filter, port_num);
224 	err = init_filter_action(qp_grp, &uaction);
225 	if (err)
226 		goto out_unreserve_port;
227 
228 	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
229 	if (IS_ERR_OR_NULL(flow)) {
230 		err = flow ? PTR_ERR(flow) : -EFAULT;
231 		goto out_unreserve_port;
232 	}
233 
234 	/* Create Flow Handle */
235 	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
236 	if (!qp_flow) {
237 		err = -ENOMEM;
238 		goto out_dealloc_flow;
239 	}
240 	qp_flow->flow = flow;
241 	qp_flow->trans_type = trans_type;
242 	qp_flow->usnic_roce.port_num = port_num;
243 	qp_flow->qp_grp = qp_grp;
244 	return qp_flow;
245 
246 out_dealloc_flow:
247 	usnic_fwd_dealloc_flow(flow);
248 out_unreserve_port:
249 	usnic_transport_unrsrv_port(trans_type, port_num);
250 	return ERR_PTR(err);
251 }
252 
253 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
254 {
255 	usnic_fwd_dealloc_flow(qp_flow->flow);
256 	usnic_transport_unrsrv_port(qp_flow->trans_type,
257 					qp_flow->usnic_roce.port_num);
258 	kfree(qp_flow);
259 }
260 
261 static struct usnic_ib_qp_grp_flow*
262 create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
263 		struct usnic_transport_spec *trans_spec)
264 {
265 	struct socket *sock;
266 	int sock_fd;
267 	int err;
268 	struct filter filter;
269 	struct usnic_filter_action uaction;
270 	struct usnic_ib_qp_grp_flow *qp_flow;
271 	struct usnic_fwd_flow *flow;
272 	enum usnic_transport_type trans_type;
273 	uint32_t addr;
274 	uint16_t port_num;
275 	int proto;
276 
277 	trans_type = trans_spec->trans_type;
278 	sock_fd = trans_spec->udp.sock_fd;
279 
280 	/* Get and check socket */
281 	sock = usnic_transport_get_socket(sock_fd);
282 	if (IS_ERR_OR_NULL(sock))
283 		return ERR_CAST(sock);
284 
285 	err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
286 	if (err)
287 		goto out_put_sock;
288 
289 	if (proto != IPPROTO_UDP) {
290 		usnic_err("Protocol for fd %d is not UDP", sock_fd);
291 		err = -EPERM;
292 		goto out_put_sock;
293 	}
294 
295 	/* Create flow */
296 	usnic_fwd_init_udp_filter(&filter, addr, port_num);
297 	err = init_filter_action(qp_grp, &uaction);
298 	if (err)
299 		goto out_put_sock;
300 
301 	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
302 	if (IS_ERR_OR_NULL(flow)) {
303 		err = flow ? PTR_ERR(flow) : -EFAULT;
304 		goto out_put_sock;
305 	}
306 
307 	/* Create qp_flow */
308 	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
309 	if (!qp_flow) {
310 		err = -ENOMEM;
311 		goto out_dealloc_flow;
312 	}
313 	qp_flow->flow = flow;
314 	qp_flow->trans_type = trans_type;
315 	qp_flow->udp.sock = sock;
316 	qp_flow->qp_grp = qp_grp;
317 	return qp_flow;
318 
319 out_dealloc_flow:
320 	usnic_fwd_dealloc_flow(flow);
321 out_put_sock:
322 	usnic_transport_put_socket(sock);
323 	return ERR_PTR(err);
324 }
325 
326 static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
327 {
328 	usnic_fwd_dealloc_flow(qp_flow->flow);
329 	usnic_transport_put_socket(qp_flow->udp.sock);
330 	kfree(qp_flow);
331 }
332 
333 static struct usnic_ib_qp_grp_flow*
334 create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
335 			struct usnic_transport_spec *trans_spec)
336 {
337 	struct usnic_ib_qp_grp_flow *qp_flow;
338 	enum usnic_transport_type trans_type;
339 
340 	trans_type = trans_spec->trans_type;
341 	switch (trans_type) {
342 	case USNIC_TRANSPORT_ROCE_CUSTOM:
343 		qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
344 		break;
345 	case USNIC_TRANSPORT_IPV4_UDP:
346 		qp_flow = create_udp_flow(qp_grp, trans_spec);
347 		break;
348 	default:
349 		usnic_err("Unsupported transport %u\n",
350 				trans_spec->trans_type);
351 		return ERR_PTR(-EINVAL);
352 	}
353 
354 	if (!IS_ERR_OR_NULL(qp_flow)) {
355 		list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
356 		usnic_debugfs_flow_add(qp_flow);
357 	}
358 
359 
360 	return qp_flow;
361 }
362 
363 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
364 {
365 	usnic_debugfs_flow_remove(qp_flow);
366 	list_del(&qp_flow->link);
367 
368 	switch (qp_flow->trans_type) {
369 	case USNIC_TRANSPORT_ROCE_CUSTOM:
370 		release_roce_custom_flow(qp_flow);
371 		break;
372 	case USNIC_TRANSPORT_IPV4_UDP:
373 		release_udp_flow(qp_flow);
374 		break;
375 	default:
376 		WARN(1, "Unsupported transport %u\n",
377 				qp_flow->trans_type);
378 		break;
379 	}
380 }
381 
382 static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
383 {
384 	struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
385 	list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
386 		release_and_remove_flow(qp_flow);
387 }
388 
389 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
390 				enum ib_qp_state new_state,
391 				void *data)
392 {
393 	int status = 0;
394 	struct ib_event ib_event;
395 	enum ib_qp_state old_state;
396 	struct usnic_transport_spec *trans_spec;
397 	struct usnic_ib_qp_grp_flow *qp_flow;
398 
399 	old_state = qp_grp->state;
400 	trans_spec = (struct usnic_transport_spec *) data;
401 
402 	spin_lock(&qp_grp->lock);
403 	switch (new_state) {
404 	case IB_QPS_RESET:
405 		switch (old_state) {
406 		case IB_QPS_RESET:
407 			/* NO-OP */
408 			break;
409 		case IB_QPS_INIT:
410 			release_and_remove_all_flows(qp_grp);
411 			status = 0;
412 			break;
413 		case IB_QPS_RTR:
414 		case IB_QPS_RTS:
415 		case IB_QPS_ERR:
416 			status = disable_qp_grp(qp_grp);
417 			release_and_remove_all_flows(qp_grp);
418 			break;
419 		default:
420 			status = -EINVAL;
421 		}
422 		break;
423 	case IB_QPS_INIT:
424 		switch (old_state) {
425 		case IB_QPS_RESET:
426 			if (trans_spec) {
427 				qp_flow = create_and_add_flow(qp_grp,
428 								trans_spec);
429 				if (IS_ERR_OR_NULL(qp_flow)) {
430 					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
431 					break;
432 				}
433 			} else {
434 				/*
435 				 * Optional to specify filters.
436 				 */
437 				status = 0;
438 			}
439 			break;
440 		case IB_QPS_INIT:
441 			if (trans_spec) {
442 				qp_flow = create_and_add_flow(qp_grp,
443 								trans_spec);
444 				if (IS_ERR_OR_NULL(qp_flow)) {
445 					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
446 					break;
447 				}
448 			} else {
449 				/*
450 				 * Doesn't make sense to go into INIT state
451 				 * from INIT state w/o adding filters.
452 				 */
453 				status = -EINVAL;
454 			}
455 			break;
456 		case IB_QPS_RTR:
457 			status = disable_qp_grp(qp_grp);
458 			break;
459 		case IB_QPS_RTS:
460 			status = disable_qp_grp(qp_grp);
461 			break;
462 		default:
463 			status = -EINVAL;
464 		}
465 		break;
466 	case IB_QPS_RTR:
467 		switch (old_state) {
468 		case IB_QPS_INIT:
469 			status = enable_qp_grp(qp_grp);
470 			break;
471 		default:
472 			status = -EINVAL;
473 		}
474 		break;
475 	case IB_QPS_RTS:
476 		switch (old_state) {
477 		case IB_QPS_RTR:
478 			/* NO-OP FOR NOW */
479 			break;
480 		default:
481 			status = -EINVAL;
482 		}
483 		break;
484 	case IB_QPS_ERR:
485 		ib_event.device = &qp_grp->vf->pf->ib_dev;
486 		ib_event.element.qp = &qp_grp->ibqp;
487 		ib_event.event = IB_EVENT_QP_FATAL;
488 
489 		switch (old_state) {
490 		case IB_QPS_RESET:
491 			qp_grp->ibqp.event_handler(&ib_event,
492 					qp_grp->ibqp.qp_context);
493 			break;
494 		case IB_QPS_INIT:
495 			release_and_remove_all_flows(qp_grp);
496 			qp_grp->ibqp.event_handler(&ib_event,
497 					qp_grp->ibqp.qp_context);
498 			break;
499 		case IB_QPS_RTR:
500 		case IB_QPS_RTS:
501 			status = disable_qp_grp(qp_grp);
502 			release_and_remove_all_flows(qp_grp);
503 			qp_grp->ibqp.event_handler(&ib_event,
504 					qp_grp->ibqp.qp_context);
505 			break;
506 		default:
507 			status = -EINVAL;
508 		}
509 		break;
510 	default:
511 		status = -EINVAL;
512 	}
513 	spin_unlock(&qp_grp->lock);
514 
515 	if (!status) {
516 		qp_grp->state = new_state;
517 		usnic_info("Transitioned %u from %s to %s",
518 		qp_grp->grp_id,
519 		usnic_ib_qp_grp_state_to_string(old_state),
520 		usnic_ib_qp_grp_state_to_string(new_state));
521 	} else {
522 		usnic_err("Failed to transition %u from %s to %s",
523 		qp_grp->grp_id,
524 		usnic_ib_qp_grp_state_to_string(old_state),
525 		usnic_ib_qp_grp_state_to_string(new_state));
526 	}
527 
528 	return status;
529 }
530 
531 static struct usnic_vnic_res_chunk**
532 alloc_res_chunk_list(struct usnic_vnic *vnic,
533 			struct usnic_vnic_res_spec *res_spec, void *owner_obj)
534 {
535 	enum usnic_vnic_res_type res_type;
536 	struct usnic_vnic_res_chunk **res_chunk_list;
537 	int err, i, res_cnt, res_lst_sz;
538 
539 	for (res_lst_sz = 0;
540 		res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
541 		res_lst_sz++) {
542 		/* Do Nothing */
543 	}
544 
545 	res_chunk_list = kcalloc(res_lst_sz + 1, sizeof(*res_chunk_list),
546 					GFP_ATOMIC);
547 	if (!res_chunk_list)
548 		return ERR_PTR(-ENOMEM);
549 
550 	for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
551 		i++) {
552 		res_type = res_spec->resources[i].type;
553 		res_cnt = res_spec->resources[i].cnt;
554 
555 		res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
556 					res_cnt, owner_obj);
557 		if (IS_ERR_OR_NULL(res_chunk_list[i])) {
558 			err = res_chunk_list[i] ?
559 					PTR_ERR(res_chunk_list[i]) : -ENOMEM;
560 			usnic_err("Failed to get %s from %s with err %d\n",
561 				usnic_vnic_res_type_to_str(res_type),
562 				usnic_vnic_pci_name(vnic),
563 				err);
564 			goto out_free_res;
565 		}
566 	}
567 
568 	return res_chunk_list;
569 
570 out_free_res:
571 	for (i--; i >= 0; i--)
572 		usnic_vnic_put_resources(res_chunk_list[i]);
573 	kfree(res_chunk_list);
574 	return ERR_PTR(err);
575 }
576 
577 static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
578 {
579 	int i;
580 	for (i = 0; res_chunk_list[i]; i++)
581 		usnic_vnic_put_resources(res_chunk_list[i]);
582 	kfree(res_chunk_list);
583 }
584 
585 static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
586 				struct usnic_ib_pd *pd,
587 				struct usnic_ib_qp_grp *qp_grp)
588 {
589 	int err;
590 	struct pci_dev *pdev;
591 
592 	lockdep_assert_held(&vf->lock);
593 
594 	pdev = usnic_vnic_get_pdev(vf->vnic);
595 	if (vf->qp_grp_ref_cnt == 0) {
596 		err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
597 		if (err) {
598 			usnic_err("Failed to attach %s to domain\n",
599 					pci_name(pdev));
600 			return err;
601 		}
602 		vf->pd = pd;
603 	}
604 	vf->qp_grp_ref_cnt++;
605 
606 	WARN_ON(vf->pd != pd);
607 	qp_grp->vf = vf;
608 
609 	return 0;
610 }
611 
612 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
613 {
614 	struct pci_dev *pdev;
615 	struct usnic_ib_pd *pd;
616 
617 	lockdep_assert_held(&qp_grp->vf->lock);
618 
619 	pd = qp_grp->vf->pd;
620 	pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
621 	if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
622 		qp_grp->vf->pd = NULL;
623 		usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
624 	}
625 	qp_grp->vf = NULL;
626 }
627 
628 static void log_spec(struct usnic_vnic_res_spec *res_spec)
629 {
630 	char buf[512];
631 	usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
632 	usnic_dbg("%s\n", buf);
633 }
634 
635 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
636 				uint32_t *id)
637 {
638 	enum usnic_transport_type trans_type = qp_flow->trans_type;
639 	int err;
640 	uint16_t port_num = 0;
641 
642 	switch (trans_type) {
643 	case USNIC_TRANSPORT_ROCE_CUSTOM:
644 		*id = qp_flow->usnic_roce.port_num;
645 		break;
646 	case USNIC_TRANSPORT_IPV4_UDP:
647 		err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
648 							NULL, NULL,
649 							&port_num);
650 		if (err)
651 			return err;
652 		/*
653 		 * Copy port_num to stack first and then to *id,
654 		 * so that the short to int cast works for little
655 		 * and big endian systems.
656 		 */
657 		*id = port_num;
658 		break;
659 	default:
660 		usnic_err("Unsupported transport %u\n", trans_type);
661 		return -EINVAL;
662 	}
663 
664 	return 0;
665 }
666 
667 int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp,
668 			   struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
669 			   struct usnic_ib_pd *pd,
670 			   struct usnic_vnic_res_spec *res_spec,
671 			   struct usnic_transport_spec *transport_spec)
672 {
673 	int err;
674 	enum usnic_transport_type transport = transport_spec->trans_type;
675 	struct usnic_ib_qp_grp_flow *qp_flow;
676 
677 	lockdep_assert_held(&vf->lock);
678 
679 	err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
680 						res_spec);
681 	if (err) {
682 		usnic_err("Spec does not meet minimum req for transport %d\n",
683 				transport);
684 		log_spec(res_spec);
685 		return err;
686 	}
687 
688 	qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
689 							qp_grp);
690 	if (IS_ERR_OR_NULL(qp_grp->res_chunk_list))
691 		return qp_grp->res_chunk_list ?
692 				     PTR_ERR(qp_grp->res_chunk_list) :
693 				     -ENOMEM;
694 
695 	err = qp_grp_and_vf_bind(vf, pd, qp_grp);
696 	if (err)
697 		goto out_free_res;
698 
699 	INIT_LIST_HEAD(&qp_grp->flows_lst);
700 	spin_lock_init(&qp_grp->lock);
701 	qp_grp->ufdev = ufdev;
702 	qp_grp->state = IB_QPS_RESET;
703 	qp_grp->owner_pid = current->pid;
704 
705 	qp_flow = create_and_add_flow(qp_grp, transport_spec);
706 	if (IS_ERR_OR_NULL(qp_flow)) {
707 		usnic_err("Unable to create and add flow with err %ld\n",
708 				PTR_ERR(qp_flow));
709 		err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
710 		goto out_qp_grp_vf_unbind;
711 	}
712 
713 	err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
714 	if (err)
715 		goto out_release_flow;
716 	qp_grp->ibqp.qp_num = qp_grp->grp_id;
717 
718 	usnic_ib_sysfs_qpn_add(qp_grp);
719 
720 	return 0;
721 
722 out_release_flow:
723 	release_and_remove_flow(qp_flow);
724 out_qp_grp_vf_unbind:
725 	qp_grp_and_vf_unbind(qp_grp);
726 out_free_res:
727 	free_qp_grp_res(qp_grp->res_chunk_list);
728 	return err;
729 }
730 
731 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
732 {
733 
734 	WARN_ON(qp_grp->state != IB_QPS_RESET);
735 	lockdep_assert_held(&qp_grp->vf->lock);
736 
737 	release_and_remove_all_flows(qp_grp);
738 	usnic_ib_sysfs_qpn_remove(qp_grp);
739 	qp_grp_and_vf_unbind(qp_grp);
740 	free_qp_grp_res(qp_grp->res_chunk_list);
741 }
742 
743 struct usnic_vnic_res_chunk*
744 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
745 				enum usnic_vnic_res_type res_type)
746 {
747 	int i;
748 
749 	for (i = 0; qp_grp->res_chunk_list[i]; i++) {
750 		if (qp_grp->res_chunk_list[i]->type == res_type)
751 			return qp_grp->res_chunk_list[i];
752 	}
753 
754 	return ERR_PTR(-EINVAL);
755 }
756