xref: /freebsd/sys/dev/irdma/irdma_kcompat.c (revision 6137b5f7b8c183ee8806d79b3f1d8e5e3ddb3df3)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2018 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "irdma_main.h"
36 
37 #define IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
38 
39 static u16 kc_rdma_flow_label_to_udp_sport(u32 fl) {
40 	u32 fl_low = fl & 0x03FFF;
41 	u32 fl_high = fl & 0xFC000;
42 
43 	fl_low ^= fl_high >> 14;
44 
45 	return (u16)(fl_low | IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN);
46 }
47 
48 #define IRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF)
49 
50 static u32 kc_rdma_calc_flow_label(u32 lqpn, u32 rqpn) {
51 	u64 fl = (u64)lqpn * rqpn;
52 
53 	fl ^= fl >> 20;
54 	fl ^= fl >> 40;
55 
56 	return (u32)(fl & IRDMA_GRH_FLOWLABEL_MASK);
57 }
58 
59 u16
60 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
61 {
62 	if (!fl)
63 		fl = kc_rdma_calc_flow_label(lqpn, rqpn);
64 	return kc_rdma_flow_label_to_udp_sport(fl);
65 }
66 
67 void
68 irdma_get_dev_fw_str(struct ib_device *dev,
69 		     char *str,
70 		     size_t str_len)
71 {
72 	struct irdma_device *iwdev = to_iwdev(dev);
73 
74 	snprintf(str, str_len, "%u.%u",
75 		 irdma_fw_major_ver(&iwdev->rf->sc_dev),
76 		 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
77 }
78 
79 int
80 irdma_add_gid(struct ib_device *device,
81 	      u8 port_num,
82 	      unsigned int index,
83 	      const union ib_gid *gid,
84 	      const struct ib_gid_attr *attr,
85 	      void **context)
86 {
87 	return 0;
88 }
89 
90 int
91 irdma_del_gid(struct ib_device *device,
92 	      u8 port_num,
93 	      unsigned int index,
94 	      void **context)
95 {
96 	return 0;
97 }
98 
99 #if __FreeBSD_version >= 1400026
100 /**
101  * irdma_alloc_mr - register stag for fast memory registration
102  * @pd: ibpd pointer
103  * @mr_type: memory for stag registrion
104  * @max_num_sg: man number of pages
105  * @udata: user data
106  */
107 struct ib_mr *
108 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
109 	       u32 max_num_sg, struct ib_udata *udata)
110 {
111 #else
112 /**
113  * irdma_alloc_mr - register stag for fast memory registration
114  * @pd: ibpd pointer
115  * @mr_type: memory for stag registrion
116  * @max_num_sg: man number of pages
117  */
118 struct ib_mr *
119 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
120 	       u32 max_num_sg)
121 {
122 #endif
123 	struct irdma_device *iwdev = to_iwdev(pd->device);
124 	struct irdma_pble_alloc *palloc;
125 	struct irdma_pbl *iwpbl;
126 	struct irdma_mr *iwmr;
127 	int status;
128 	u32 stag;
129 	int err_code = -ENOMEM;
130 
131 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
132 	if (!iwmr)
133 		return ERR_PTR(-ENOMEM);
134 
135 	stag = irdma_create_stag(iwdev);
136 	if (!stag) {
137 		err_code = -ENOMEM;
138 		goto err;
139 	}
140 
141 	iwmr->stag = stag;
142 	iwmr->ibmr.rkey = stag;
143 	iwmr->ibmr.lkey = stag;
144 	iwmr->ibmr.pd = pd;
145 	iwmr->ibmr.device = pd->device;
146 	iwpbl = &iwmr->iwpbl;
147 	iwpbl->iwmr = iwmr;
148 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
149 	palloc = &iwpbl->pble_alloc;
150 	iwmr->page_cnt = max_num_sg;
151 	/* Assume system PAGE_SIZE as the sg page sizes are unknown. */
152 	iwmr->len = max_num_sg * PAGE_SIZE;
153 	status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
154 				false);
155 	if (status)
156 		goto err_get_pble;
157 
158 	err_code = irdma_hw_alloc_stag(iwdev, iwmr);
159 	if (err_code)
160 		goto err_alloc_stag;
161 
162 	iwpbl->pbl_allocated = true;
163 
164 	return &iwmr->ibmr;
165 err_alloc_stag:
166 	irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
167 err_get_pble:
168 	irdma_free_stag(iwdev, stag);
169 err:
170 	kfree(iwmr);
171 
172 	return ERR_PTR(err_code);
173 }
174 
175 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
176 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
177 #if __FreeBSD_version >= 1400026
178 /**
179  * irdma_alloc_ucontext - Allocate the user context data structure
180  * @uctx: context
181  * @udata: user data
182  *
183  * This keeps track of all objects associated with a particular
184  * user-mode client.
185  */
186 int
187 irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
188 {
189 	struct ib_device *ibdev = uctx->device;
190 	struct irdma_device *iwdev = to_iwdev(ibdev);
191 	struct irdma_alloc_ucontext_req req = {0};
192 	struct irdma_alloc_ucontext_resp uresp = {0};
193 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
194 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
195 
196 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
197 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
198 		return -EINVAL;
199 
200 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
201 		return -EINVAL;
202 
203 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
204 		goto ver_error;
205 
206 	ucontext->iwdev = iwdev;
207 	ucontext->abi_ver = req.userspace_ver;
208 
209 	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
210 		ucontext->use_raw_attrs = true;
211 
212 	/* GEN_1 support for libi40iw */
213 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
214 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
215 			return -EOPNOTSUPP;
216 
217 		ucontext->legacy_mode = true;
218 		uresp.max_qps = iwdev->rf->max_qp;
219 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
220 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
221 		uresp.kernel_ver = req.userspace_ver;
222 		if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
223 			return -EFAULT;
224 	} else {
225 		u64 bar_off;
226 
227 		uresp.kernel_ver = IRDMA_ABI_VER;
228 		uresp.feature_flags = uk_attrs->feature_flags;
229 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
230 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
231 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
232 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
233 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
234 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
235 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
236 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
237 		uresp.hw_rev = uk_attrs->hw_rev;
238 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
239 
240 		bar_off =
241 		    (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
242 		ucontext->db_mmap_entry =
243 		    irdma_user_mmap_entry_insert(ucontext, bar_off,
244 						 IRDMA_MMAP_IO_NC,
245 						 &uresp.db_mmap_key);
246 		if (!ucontext->db_mmap_entry) {
247 			return -ENOMEM;
248 		}
249 
250 		if (ib_copy_to_udata(udata, &uresp,
251 				     min(sizeof(uresp), udata->outlen))) {
252 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
253 			return -EFAULT;
254 		}
255 	}
256 
257 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
258 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
259 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
260 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
261 	INIT_LIST_HEAD(&ucontext->vma_list);
262 	mutex_init(&ucontext->vma_list_mutex);
263 
264 	return 0;
265 
266 ver_error:
267 	irdma_dev_err(&iwdev->ibdev,
268 		      "Invalid userspace driver version detected. Detected version %d, should be %d\n",
269 		      req.userspace_ver, IRDMA_ABI_VER);
270 	return -EINVAL;
271 }
272 #endif
273 
274 #if __FreeBSD_version < 1400026
275 /**
276  * irdma_alloc_ucontext - Allocate the user context data structure
277  * @ibdev: ib device pointer
278  * @udata: user data
279  *
280  * This keeps track of all objects associated with a particular
281  * user-mode client.
282  */
283 struct ib_ucontext *
284 irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
285 {
286 	struct irdma_device *iwdev = to_iwdev(ibdev);
287 	struct irdma_alloc_ucontext_req req = {0};
288 	struct irdma_alloc_ucontext_resp uresp = {0};
289 	struct irdma_ucontext *ucontext;
290 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
291 
292 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
293 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
294 		return ERR_PTR(-EINVAL);
295 
296 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
297 		return ERR_PTR(-EINVAL);
298 
299 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
300 		goto ver_error;
301 
302 	ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
303 	if (!ucontext)
304 		return ERR_PTR(-ENOMEM);
305 
306 	ucontext->iwdev = iwdev;
307 	ucontext->abi_ver = req.userspace_ver;
308 
309 	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
310 		ucontext->use_raw_attrs = true;
311 
312 	/* GEN_1 legacy support with libi40iw */
313 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
314 		if (uk_attrs->hw_rev != IRDMA_GEN_1) {
315 			kfree(ucontext);
316 			return ERR_PTR(-EOPNOTSUPP);
317 		}
318 
319 		ucontext->legacy_mode = true;
320 		uresp.max_qps = iwdev->rf->max_qp;
321 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
322 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
323 		uresp.kernel_ver = req.userspace_ver;
324 		if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) {
325 			kfree(ucontext);
326 			return ERR_PTR(-EFAULT);
327 		}
328 	} else {
329 		u64 bar_off;
330 
331 		uresp.kernel_ver = IRDMA_ABI_VER;
332 		uresp.feature_flags = uk_attrs->feature_flags;
333 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
334 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
335 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
336 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
337 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
338 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
339 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
340 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
341 		uresp.hw_rev = uk_attrs->hw_rev;
342 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
343 
344 		bar_off =
345 		    (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
346 
347 		spin_lock_init(&ucontext->mmap_tbl_lock);
348 		ucontext->db_mmap_entry =
349 		    irdma_user_mmap_entry_add_hash(ucontext, bar_off,
350 						   IRDMA_MMAP_IO_NC,
351 						   &uresp.db_mmap_key);
352 		if (!ucontext->db_mmap_entry) {
353 			spin_lock_destroy(&ucontext->mmap_tbl_lock);
354 			kfree(ucontext);
355 			return ERR_PTR(-ENOMEM);
356 		}
357 
358 		if (ib_copy_to_udata(udata, &uresp,
359 				     min(sizeof(uresp), udata->outlen))) {
360 			irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
361 			spin_lock_destroy(&ucontext->mmap_tbl_lock);
362 			kfree(ucontext);
363 			return ERR_PTR(-EFAULT);
364 		}
365 	}
366 
367 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
368 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
369 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
370 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
371 	INIT_LIST_HEAD(&ucontext->vma_list);
372 	mutex_init(&ucontext->vma_list_mutex);
373 
374 	return &ucontext->ibucontext;
375 
376 ver_error:
377 	irdma_dev_err(&iwdev->ibdev,
378 		      "Invalid userspace driver version detected. Detected version %d, should be %d\n",
379 		      req.userspace_ver, IRDMA_ABI_VER);
380 	return ERR_PTR(-EINVAL);
381 }
382 #endif
383 
384 #if __FreeBSD_version >= 1400026
385 /**
386  * irdma_dealloc_ucontext - deallocate the user context data structure
387  * @context: user context created during alloc
388  */
389 void
390 irdma_dealloc_ucontext(struct ib_ucontext *context)
391 {
392 	struct irdma_ucontext *ucontext = to_ucontext(context);
393 
394 	rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
395 
396 	return;
397 }
398 #endif
399 
400 #if __FreeBSD_version < 1400026
401 /**
402  * irdma_dealloc_ucontext - deallocate the user context data structure
403  * @context: user context created during alloc
404  */
405 int
406 irdma_dealloc_ucontext(struct ib_ucontext *context)
407 {
408 	struct irdma_ucontext *ucontext = to_ucontext(context);
409 
410 	irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
411 	spin_lock_destroy(&ucontext->mmap_tbl_lock);
412 	kfree(ucontext);
413 
414 	return 0;
415 }
416 #endif
417 
418 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
419 #if __FreeBSD_version >= 1400026
420 /**
421  * irdma_alloc_pd - allocate protection domain
422  * @pd: protection domain
423  * @udata: user data
424  */
425 int
426 irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
427 {
428 	struct irdma_pd *iwpd = to_iwpd(pd);
429 	struct irdma_device *iwdev = to_iwdev(pd->device);
430 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
431 	struct irdma_pci_f *rf = iwdev->rf;
432 	struct irdma_alloc_pd_resp uresp = {0};
433 	struct irdma_sc_pd *sc_pd;
434 	u32 pd_id = 0;
435 	int err;
436 
437 	if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
438 		return -EINVAL;
439 
440 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
441 			       &rf->next_pd);
442 	if (err)
443 		return err;
444 
445 	sc_pd = &iwpd->sc_pd;
446 	if (udata) {
447 		struct irdma_ucontext *ucontext =
448 		rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
449 
450 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
451 		uresp.pd_id = pd_id;
452 		if (ib_copy_to_udata(udata, &uresp,
453 				     min(sizeof(uresp), udata->outlen))) {
454 			err = -EFAULT;
455 			goto error;
456 		}
457 	} else {
458 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
459 	}
460 
461 	spin_lock_init(&iwpd->udqp_list_lock);
462 	INIT_LIST_HEAD(&iwpd->udqp_list);
463 
464 	return 0;
465 
466 error:
467 
468 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
469 
470 	return err;
471 }
472 #endif
473 
474 #if __FreeBSD_version < 1400026
475 /**
476  * irdma_alloc_pd - allocate protection domain
477  * @ibdev: IB device
478  * @context: user context
479  * @udata: user data
480  */
481 struct ib_pd *
482 irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata)
483 {
484 	struct irdma_pd *iwpd;
485 	struct irdma_device *iwdev = to_iwdev(ibdev);
486 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
487 	struct irdma_pci_f *rf = iwdev->rf;
488 	struct irdma_alloc_pd_resp uresp = {0};
489 	struct irdma_sc_pd *sc_pd;
490 	u32 pd_id = 0;
491 	int err;
492 
493 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
494 			       &rf->next_pd);
495 	if (err)
496 		return ERR_PTR(err);
497 
498 	iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
499 	if (!iwpd) {
500 		err = -ENOMEM;
501 		goto free_res;
502 	}
503 
504 	sc_pd = &iwpd->sc_pd;
505 	if (udata) {
506 		struct irdma_ucontext *ucontext = to_ucontext(context);
507 
508 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
509 		uresp.pd_id = pd_id;
510 		if (ib_copy_to_udata(udata, &uresp,
511 				     min(sizeof(uresp), udata->outlen))) {
512 			err = -EFAULT;
513 			goto error;
514 		}
515 	} else {
516 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
517 	}
518 
519 	spin_lock_init(&iwpd->udqp_list_lock);
520 	INIT_LIST_HEAD(&iwpd->udqp_list);
521 
522 	return &iwpd->ibpd;
523 
524 error:
525 	kfree(iwpd);
526 free_res:
527 
528 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
529 
530 	return ERR_PTR(err);
531 }
532 
533 #endif
534 
535 #if __FreeBSD_version >= 1400026
536 void
537 irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
538 {
539 	struct irdma_pd *iwpd = to_iwpd(ibpd);
540 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
541 
542 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
543 }
544 
545 #endif
546 
547 #if __FreeBSD_version < 1400026
548 int
549 irdma_dealloc_pd(struct ib_pd *ibpd)
550 {
551 	struct irdma_pd *iwpd = to_iwpd(ibpd);
552 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
553 
554 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
555 	kfree(iwpd);
556 	return 0;
557 }
558 #endif
559 
560 /**
561  * irdma_find_qp_update_qs - update QS handle for UD QPs
562  * @rf: RDMA PCI function
563  * @pd: protection domain object
564  * @user_pri: selected user priority
565  */
566 static void
567 irdma_find_qp_update_qs(struct irdma_pci_f *rf,
568 			struct irdma_pd *pd, u8 user_pri)
569 {
570 	struct irdma_qp *iwqp;
571 	struct list_head *tmp_node, *list_node;
572 	struct irdma_udqs_work *work;
573 	unsigned long flags;
574 	bool qs_change;
575 
576 	spin_lock_irqsave(&pd->udqp_list_lock, flags);
577 	list_for_each_safe(list_node, tmp_node, &pd->udqp_list) {
578 		qs_change = true;
579 		iwqp = list_entry(list_node, struct irdma_qp, ud_list_elem);
580 		irdma_qp_add_ref(&iwqp->ibqp);
581 		/* check if qs_handle needs to be changed */
582 		if (iwqp->sc_qp.qs_handle == iwqp->sc_qp.vsi->qos[user_pri].qs_handle) {
583 			if (iwqp->ctx_info.user_pri == user_pri) {
584 				/* qs_handle and user_pri don't change */
585 				irdma_qp_rem_ref(&iwqp->ibqp);
586 				continue;
587 			}
588 			qs_change = false;
589 		}
590 		/* perform qp qos change */
591 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
592 		if (!work) {
593 			irdma_qp_rem_ref(&iwqp->ibqp);
594 			spin_unlock_irqrestore(&pd->udqp_list_lock, flags);
595 			return;
596 		}
597 		work->iwqp = iwqp;
598 		work->user_prio = user_pri;
599 		work->qs_change = qs_change;
600 		INIT_WORK(&work->work, irdma_udqp_qs_worker);
601 		if (qs_change)
602 			irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND);
603 		queue_work(rf->iwdev->cleanup_wq, &work->work);
604 	}
605 	spin_unlock_irqrestore(&pd->udqp_list_lock, flags);
606 }
607 
608 static void
609 irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
610 		   const struct ib_gid_attr *sgid_attr,
611 		   union irdma_sockaddr *sgid_addr,
612 		   union irdma_sockaddr *dgid_addr,
613 		   u8 *dmac, u8 net_type)
614 {
615 	if (net_type == RDMA_NETWORK_IPV4) {
616 		ah_info->ipv4_valid = true;
617 		ah_info->dest_ip_addr[0] =
618 		    ntohl(dgid_addr->saddr_in.sin_addr.s_addr);
619 		ah_info->src_ip_addr[0] =
620 		    ntohl(sgid_addr->saddr_in.sin_addr.s_addr);
621 		CURVNET_SET_QUIET(vnet);
622 		ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
623 						     ah_info->dest_ip_addr[0]);
624 		CURVNET_RESTORE();
625 		if (ipv4_is_multicast(dgid_addr->saddr_in.sin_addr.s_addr)) {
626 			irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
627 		}
628 	} else {
629 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
630 				    dgid_addr->saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
631 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
632 				    sgid_addr->saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
633 		ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
634 						     ah_info->dest_ip_addr);
635 		if (rdma_is_multicast_addr(&dgid_addr->saddr_in6.sin6_addr)) {
636 			irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac);
637 		}
638 	}
639 }
640 
641 static inline u8 irdma_roce_get_vlan_prio(if_t ndev, u8 prio)
642 {
643 	return prio;
644 }
645 
646 static int
647 irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
648 			 struct irdma_pd *pd,
649 			 struct irdma_ah_info *ah_info,
650 			 const struct ib_gid_attr *sgid_attr,
651 			 u8 *dmac)
652 {
653 	u16 vlan_prio;
654 
655 	if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev))
656 		ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
657 	else
658 		ah_info->vlan_tag = VLAN_N_VID;
659 
660 	ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac);
661 
662 	if (ah_info->dst_arpindex == -1)
663 		return -EINVAL;
664 
665 	if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
666 		ah_info->vlan_tag = 0;
667 
668 	if (ah_info->vlan_tag < VLAN_N_VID) {
669 		ah_info->insert_vlan_tag = true;
670 		vlan_prio = (u16)irdma_roce_get_vlan_prio(sgid_attr->ndev,
671 							  rt_tos2priority(ah_info->tc_tos));
672 		ah_info->vlan_tag |= vlan_prio << VLAN_PRIO_SHIFT;
673 		irdma_find_qp_update_qs(iwdev->rf, pd, vlan_prio);
674 	}
675 	if (iwdev->roce_dcqcn_en) {
676 		ah_info->tc_tos &= ~ECN_CODE_PT_MASK;
677 		ah_info->tc_tos |= ECN_CODE_PT_VAL;
678 	}
679 
680 	return 0;
681 }
682 
683 static int
684 irdma_create_ah_wait(struct irdma_pci_f *rf,
685 		     struct irdma_sc_ah *sc_ah, bool sleep)
686 {
687 	if (!sleep) {
688 		int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
689 		CQP_TIMEOUT_THRESHOLD;
690 
691 		do {
692 			irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
693 			mdelay(1);
694 		} while (!sc_ah->ah_info.ah_valid && --cnt);
695 
696 		if (!cnt)
697 			return -ETIMEDOUT;
698 	}
699 	return 0;
700 }
701 
702 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
703 
704 #if __FreeBSD_version >= 1400026
705 /**
706  * irdma_create_ah - create address handle
707  * @ib_ah: ptr to AH
708  * @attr: address handle attributes
709  * @flags: AH flags to wait
710  * @udata: user data
711  *
712  * returns 0 on success, error otherwise
713  */
714 int
715 irdma_create_ah(struct ib_ah *ib_ah,
716 		struct ib_ah_attr *attr, u32 flags,
717 		struct ib_udata *udata)
718 {
719 	struct irdma_pd *pd = to_iwpd(ib_ah->pd);
720 	struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);
721 	struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device);
722 	union ib_gid sgid;
723 	struct ib_gid_attr sgid_attr;
724 	struct irdma_pci_f *rf = iwdev->rf;
725 	struct irdma_sc_ah *sc_ah;
726 	u32 ah_id = 0;
727 	struct irdma_ah_info *ah_info;
728 	struct irdma_create_ah_resp uresp = {};
729 	union irdma_sockaddr sgid_addr, dgid_addr;
730 	int err;
731 	u8 dmac[ETHER_ADDR_LEN];
732 	bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0;
733 
734 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
735 		return -EINVAL;
736 
737 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
738 			       rf->max_ah, &ah_id, &rf->next_ah);
739 
740 	if (err)
741 		return err;
742 
743 	ah->pd = pd;
744 	sc_ah = &ah->sc_ah;
745 	sc_ah->ah_info.ah_idx = ah_id;
746 	sc_ah->ah_info.vsi = &iwdev->vsi;
747 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
748 	ah->sgid_index = attr->grh.sgid_index;
749 	memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
750 	rcu_read_lock();
751 	err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
752 				attr->grh.sgid_index, &sgid, &sgid_attr);
753 	rcu_read_unlock();
754 	if (err) {
755 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
756 			    "GID lookup at idx=%d with port=%d failed\n",
757 			    attr->grh.sgid_index, attr->port_num);
758 		err = -EINVAL;
759 		goto err_gid_l2;
760 	}
761 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
762 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
763 	ah->av.attrs = *attr;
764 	ah->av.net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
765 
766 	if (sgid_attr.ndev)
767 		dev_put(sgid_attr.ndev);
768 
769 	ah_info = &sc_ah->ah_info;
770 	ah_info->ah_idx = ah_id;
771 	ah_info->pd_idx = pd->sc_pd.pd_id;
772 	ether_addr_copy(ah_info->mac_addr, if_getlladdr(iwdev->netdev));
773 
774 	if (attr->ah_flags & IB_AH_GRH) {
775 		ah_info->flow_label = attr->grh.flow_label;
776 		ah_info->hop_ttl = attr->grh.hop_limit;
777 		ah_info->tc_tos = attr->grh.traffic_class;
778 	}
779 
780 	ether_addr_copy(dmac, attr->dmac);
781 
782 	irdma_fill_ah_info(if_getvnet(iwdev->netdev), ah_info, &sgid_attr, &sgid_addr, &dgid_addr,
783 			   dmac, ah->av.net_type);
784 
785 	err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
786 	if (err)
787 		goto err_gid_l2;
788 
789 	err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
790 			      sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
791 	if (err) {
792 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP-OP Create AH fail");
793 		goto err_gid_l2;
794 	}
795 
796 	err = irdma_create_ah_wait(rf, sc_ah, sleep);
797 	if (err) {
798 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
799 		goto err_gid_l2;
800 	}
801 
802 	if (udata) {
803 		uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
804 		err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
805 		if (err) {
806 			irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
807 					IRDMA_OP_AH_DESTROY, false, NULL, ah);
808 			goto err_gid_l2;
809 		}
810 	}
811 
812 	return 0;
813 err_gid_l2:
814 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
815 
816 	return err;
817 }
818 #endif
819 
820 void
821 irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
822 {
823 	ether_addr_copy(dmac, attr->dmac);
824 }
825 
826 #if __FreeBSD_version < 1400026
827 struct ib_ah *
828 irdma_create_ah_stub(struct ib_pd *ibpd,
829 		     struct ib_ah_attr *attr,
830 		     struct ib_udata *udata)
831 #else
832 int
833 irdma_create_ah_stub(struct ib_ah *ib_ah,
834 		     struct ib_ah_attr *attr, u32 flags,
835 		     struct ib_udata *udata)
836 #endif
837 {
838 #if __FreeBSD_version >= 1400026
839 	return -ENOSYS;
840 #else
841 	return ERR_PTR(-ENOSYS);
842 #endif
843 }
844 
845 #if __FreeBSD_version >= 1400026
846 void
847 irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
848 {
849 	return;
850 }
851 #else
852 int
853 irdma_destroy_ah_stub(struct ib_ah *ibah)
854 {
855 	return -ENOSYS;
856 }
857 #endif
858 
859 #if __FreeBSD_version < 1400026
860 /**
861  * irdma_create_ah - create address handle
862  * @ibpd: ptr to pd
863  * @attr: address handle attributes
864  * @udata: user data
865  *
866  * returns a pointer to an address handle
867  */
868 struct ib_ah *
869 irdma_create_ah(struct ib_pd *ibpd,
870 		struct ib_ah_attr *attr,
871 		struct ib_udata *udata)
872 {
873 	struct irdma_pd *pd = to_iwpd(ibpd);
874 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
875 	struct irdma_ah *ah;
876 	union ib_gid sgid;
877 	struct ib_gid_attr sgid_attr;
878 	struct irdma_pci_f *rf = iwdev->rf;
879 	struct irdma_sc_ah *sc_ah;
880 	u32 ah_id = 0;
881 	struct irdma_ah_info *ah_info;
882 	struct irdma_create_ah_resp uresp = {};
883 	union irdma_sockaddr sgid_addr, dgid_addr;
884 	int err;
885 	u8 dmac[ETHER_ADDR_LEN];
886 	bool sleep = udata ? true : false;
887 
888 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
889 		return ERR_PTR(-EINVAL);
890 
891 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
892 			       rf->max_ah, &ah_id, &rf->next_ah);
893 
894 	if (err)
895 		return ERR_PTR(err);
896 
897 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
898 	if (!ah) {
899 		irdma_free_rsrc(rf, rf->allocated_ahs, ah_id);
900 		return ERR_PTR(-ENOMEM);
901 	}
902 
903 	ah->pd = pd;
904 	sc_ah = &ah->sc_ah;
905 	sc_ah->ah_info.ah_idx = ah_id;
906 	sc_ah->ah_info.vsi = &iwdev->vsi;
907 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
908 	ah->sgid_index = attr->grh.sgid_index;
909 	memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
910 	rcu_read_lock();
911 	err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
912 				attr->grh.sgid_index, &sgid, &sgid_attr);
913 	rcu_read_unlock();
914 	if (err) {
915 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
916 			    "GID lookup at idx=%d with port=%d failed\n",
917 			    attr->grh.sgid_index, attr->port_num);
918 		err = -EINVAL;
919 		goto err_gid_l2;
920 	}
921 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
922 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
923 	ah->av.attrs = *attr;
924 	ah->av.net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
925 
926 	if (sgid_attr.ndev)
927 		dev_put(sgid_attr.ndev);
928 
929 	ah_info = &sc_ah->ah_info;
930 	ah_info->ah_idx = ah_id;
931 	ah_info->pd_idx = pd->sc_pd.pd_id;
932 
933 	ether_addr_copy(ah_info->mac_addr, if_getlladdr(iwdev->netdev));
934 	if (attr->ah_flags & IB_AH_GRH) {
935 		ah_info->flow_label = attr->grh.flow_label;
936 		ah_info->hop_ttl = attr->grh.hop_limit;
937 		ah_info->tc_tos = attr->grh.traffic_class;
938 	}
939 
940 	if (udata)
941 		ib_resolve_eth_dmac(ibpd->device, attr);
942 	irdma_ether_copy(dmac, attr);
943 
944 	irdma_fill_ah_info(if_getvnet(iwdev->netdev), ah_info, &sgid_attr, &sgid_addr, &dgid_addr,
945 			   dmac, ah->av.net_type);
946 
947 	err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
948 	if (err)
949 		goto err_gid_l2;
950 
951 	err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
952 			      sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
953 	if (err) {
954 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "CQP-OP Create AH fail");
955 		goto err_gid_l2;
956 	}
957 
958 	err = irdma_create_ah_wait(rf, sc_ah, sleep);
959 	if (err) {
960 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
961 		goto err_gid_l2;
962 	}
963 
964 	if (udata) {
965 		uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
966 		err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
967 		if (err) {
968 			irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
969 					IRDMA_OP_AH_DESTROY, false, NULL, ah);
970 			goto err_gid_l2;
971 		}
972 	}
973 
974 	return &ah->ibah;
975 err_gid_l2:
976 	kfree(ah);
977 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
978 
979 	return ERR_PTR(err);
980 }
981 #endif
982 
983 /**
984  * irdma_free_qp_rsrc - free up memory resources for qp
985  * @iwqp: qp ptr (user or kernel)
986  */
987 void
988 irdma_free_qp_rsrc(struct irdma_qp *iwqp)
989 {
990 	struct irdma_device *iwdev = iwqp->iwdev;
991 	struct irdma_pci_f *rf = iwdev->rf;
992 	u32 qp_num = iwqp->ibqp.qp_num;
993 
994 	irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
995 	irdma_dealloc_push_page(rf, &iwqp->sc_qp);
996 	if (iwqp->sc_qp.vsi) {
997 		irdma_qp_rem_qos(&iwqp->sc_qp);
998 		iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
999 					   iwqp->sc_qp.user_pri);
1000 	}
1001 
1002 	if (qp_num > 2)
1003 		irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1004 	irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
1005 	irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
1006 	kfree(iwqp->kqp.sig_trk_mem);
1007 	iwqp->kqp.sig_trk_mem = NULL;
1008 	kfree(iwqp->kqp.sq_wrid_mem);
1009 	kfree(iwqp->kqp.rq_wrid_mem);
1010 	kfree(iwqp->sg_list);
1011 	kfree(iwqp);
1012 }
1013 
1014 /**
1015  * irdma_create_qp - create qp
1016  * @ibpd: ptr of pd
1017  * @init_attr: attributes for qp
1018  * @udata: user data for create qp
1019  */
1020 struct ib_qp *
1021 irdma_create_qp(struct ib_pd *ibpd,
1022 		struct ib_qp_init_attr *init_attr,
1023 		struct ib_udata *udata)
1024 {
1025 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
1026 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
1027 	struct irdma_pd *iwpd = to_iwpd(ibpd);
1028 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
1029 	struct irdma_pci_f *rf = iwdev->rf;
1030 	struct irdma_qp *iwqp;
1031 	struct irdma_create_qp_resp uresp = {0};
1032 	u32 qp_num = 0;
1033 	int ret;
1034 	int err_code;
1035 	struct irdma_sc_qp *qp;
1036 	struct irdma_sc_dev *dev = &rf->sc_dev;
1037 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
1038 	struct irdma_qp_init_info init_info = {{0}};
1039 	struct irdma_qp_host_ctx_info *ctx_info;
1040 	unsigned long flags;
1041 
1042 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
1043 	if (err_code)
1044 		return ERR_PTR(err_code);
1045 
1046 	if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
1047 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
1048 		return ERR_PTR(-EINVAL);
1049 
1050 	init_info.vsi = &iwdev->vsi;
1051 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
1052 	init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
1053 	init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
1054 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
1055 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
1056 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
1057 
1058 	iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
1059 	if (!iwqp)
1060 		return ERR_PTR(-ENOMEM);
1061 
1062 	iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list),
1063 				GFP_KERNEL);
1064 	if (!iwqp->sg_list) {
1065 		kfree(iwqp);
1066 		return ERR_PTR(-ENOMEM);
1067 	}
1068 
1069 	qp = &iwqp->sc_qp;
1070 	qp->qp_uk.back_qp = iwqp;
1071 	qp->qp_uk.lock = &iwqp->lock;
1072 	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1073 
1074 	iwqp->iwdev = iwdev;
1075 	iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE;
1076 	iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem,
1077 						     iwqp->q2_ctx_mem.size,
1078 						     256);
1079 	if (!iwqp->q2_ctx_mem.va) {
1080 		kfree(iwqp->sg_list);
1081 		kfree(iwqp);
1082 		return ERR_PTR(-ENOMEM);
1083 	}
1084 
1085 	init_info.q2 = iwqp->q2_ctx_mem.va;
1086 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
1087 	init_info.host_ctx = (__le64 *) (init_info.q2 + IRDMA_Q2_BUF_SIZE);
1088 	init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
1089 
1090 	if (init_attr->qp_type == IB_QPT_GSI)
1091 		qp_num = 1;
1092 	else
1093 		err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
1094 					    &qp_num, &rf->next_qp);
1095 	if (err_code)
1096 		goto error;
1097 
1098 	iwqp->iwpd = iwpd;
1099 	iwqp->ibqp.qp_num = qp_num;
1100 	qp = &iwqp->sc_qp;
1101 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
1102 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
1103 	iwqp->host_ctx.va = init_info.host_ctx;
1104 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
1105 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
1106 
1107 	init_info.pd = &iwpd->sc_pd;
1108 	init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
1109 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
1110 		init_info.qp_uk_init_info.first_sq_wq = 1;
1111 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
1112 	init_waitqueue_head(&iwqp->waitq);
1113 	init_waitqueue_head(&iwqp->mod_qp_waitq);
1114 
1115 	spin_lock_init(&iwqp->dwork_flush_lock);
1116 
1117 	if (udata) {
1118 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
1119 		err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
1120 	} else {
1121 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
1122 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
1123 		err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
1124 	}
1125 
1126 	if (err_code) {
1127 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "setup qp failed\n");
1128 		goto error;
1129 	}
1130 
1131 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1132 		if (init_attr->qp_type == IB_QPT_RC) {
1133 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
1134 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1135 			    IRDMA_WRITE_WITH_IMM |
1136 			    IRDMA_ROCE;
1137 		} else {
1138 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
1139 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1140 			    IRDMA_ROCE;
1141 		}
1142 	} else {
1143 		init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
1144 		init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
1145 	}
1146 
1147 	ret = irdma_sc_qp_init(qp, &init_info);
1148 	if (ret) {
1149 		err_code = -EPROTO;
1150 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "qp_init fail\n");
1151 		goto error;
1152 	}
1153 
1154 	ctx_info = &iwqp->ctx_info;
1155 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1156 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1157 
1158 	if (rdma_protocol_roce(&iwdev->ibdev, 1))
1159 		irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
1160 	else
1161 		irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1162 
1163 	err_code = irdma_cqp_create_qp_cmd(iwqp);
1164 	if (err_code)
1165 		goto error;
1166 
1167 	atomic_set(&iwqp->refcnt, 1);
1168 	spin_lock_init(&iwqp->lock);
1169 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1170 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
1171 	rf->qp_table[qp_num] = iwqp;
1172 
1173 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1174 		if (dev->ws_add(&iwdev->vsi, 0)) {
1175 			irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1176 			err_code = -EINVAL;
1177 			goto error;
1178 		}
1179 
1180 		irdma_qp_add_qos(&iwqp->sc_qp);
1181 		spin_lock_irqsave(&iwpd->udqp_list_lock, flags);
1182 		if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD)
1183 			list_add_tail(&iwqp->ud_list_elem, &iwpd->udqp_list);
1184 		spin_unlock_irqrestore(&iwpd->udqp_list_lock, flags);
1185 	}
1186 
1187 	if (udata) {
1188 		/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
1189 		if (udata->outlen < sizeof(uresp)) {
1190 			uresp.lsmm = 1;
1191 			uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
1192 		} else {
1193 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1194 				uresp.lsmm = 1;
1195 		}
1196 		uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
1197 		uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
1198 		uresp.qp_id = qp_num;
1199 		uresp.qp_caps = qp->qp_uk.qp_caps;
1200 
1201 		err_code = ib_copy_to_udata(udata, &uresp,
1202 					    min(sizeof(uresp), udata->outlen));
1203 		if (err_code) {
1204 			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
1205 			kc_irdma_destroy_qp(&iwqp->ibqp, udata);
1206 			return ERR_PTR(err_code);
1207 		}
1208 	}
1209 
1210 	init_completion(&iwqp->free_qp);
1211 	return &iwqp->ibqp;
1212 
1213 error:
1214 	irdma_free_qp_rsrc(iwqp);
1215 
1216 	return ERR_PTR(err_code);
1217 }
1218 
1219 /**
1220  * irdma_destroy_qp - destroy qp
1221  * @ibqp: qp's ib pointer also to get to device's qp address
1222  * @udata: user data
1223  */
1224 #if __FreeBSD_version >= 1400026
1225 int
1226 irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1227 #else
1228 int
1229 irdma_destroy_qp(struct ib_qp *ibqp)
1230 #endif
1231 {
1232 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1233 	struct irdma_device *iwdev = iwqp->iwdev;
1234 	unsigned long flags;
1235 
1236 	if (iwqp->sc_qp.qp_uk.destroy_pending)
1237 		goto free_rsrc;
1238 	iwqp->sc_qp.qp_uk.destroy_pending = true;
1239 
1240 	spin_lock_irqsave(&iwqp->iwpd->udqp_list_lock, flags);
1241 	if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD)
1242 		list_del(&iwqp->ud_list_elem);
1243 	spin_unlock_irqrestore(&iwqp->iwpd->udqp_list_lock, flags);
1244 
1245 	if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
1246 		irdma_modify_qp_to_err(&iwqp->sc_qp);
1247 
1248 	if (!iwqp->user_mode) {
1249 		if (iwqp->iwscq) {
1250 			irdma_clean_cqes(iwqp, iwqp->iwscq);
1251 			if (iwqp->iwrcq != iwqp->iwscq)
1252 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
1253 		}
1254 	}
1255 	irdma_qp_rem_ref(&iwqp->ibqp);
1256 	wait_for_completion(&iwqp->free_qp);
1257 	irdma_free_lsmm_rsrc(iwqp);
1258 	if (!iwdev->rf->reset && irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
1259 		return (iwdev->rf->rdma_ver <= IRDMA_GEN_2 && !iwqp->user_mode) ? 0 : -ENOTRECOVERABLE;
1260 free_rsrc:
1261 	irdma_remove_push_mmap_entries(iwqp);
1262 	irdma_free_qp_rsrc(iwqp);
1263 
1264 	return 0;
1265 }
1266 
1267 /**
1268  * irdma_create_cq - create cq
1269  * @ibcq: CQ allocated
1270  * @attr: attributes for cq
1271  * @udata: user data
1272  */
1273 #if __FreeBSD_version >= 1400026
1274 int
1275 irdma_create_cq(struct ib_cq *ibcq,
1276 		const struct ib_cq_init_attr *attr,
1277 		struct ib_udata *udata)
1278 #else
1279 struct ib_cq *
1280 irdma_create_cq(struct ib_device *ibdev,
1281 		const struct ib_cq_init_attr *attr,
1282 		struct ib_ucontext *context,
1283 		struct ib_udata *udata)
1284 #endif
1285 {
1286 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
1287 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
1288 #if __FreeBSD_version >= 1400026
1289 	struct ib_device *ibdev = ibcq->device;
1290 #endif
1291 	struct irdma_device *iwdev = to_iwdev(ibdev);
1292 	struct irdma_pci_f *rf = iwdev->rf;
1293 #if __FreeBSD_version >= 1400026
1294 	struct irdma_cq *iwcq = to_iwcq(ibcq);
1295 #else
1296 	struct irdma_cq *iwcq;
1297 #endif
1298 	u32 cq_num = 0;
1299 	struct irdma_sc_cq *cq;
1300 	struct irdma_sc_dev *dev = &rf->sc_dev;
1301 	struct irdma_cq_init_info info = {0};
1302 	int status;
1303 	struct irdma_cqp_request *cqp_request;
1304 	struct cqp_cmds_info *cqp_info;
1305 	struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1306 	unsigned long flags;
1307 	int err_code;
1308 	int entries = attr->cqe;
1309 	bool cqe_64byte_ena;
1310 
1311 #if __FreeBSD_version >= 1400026
1312 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1313 	if (err_code)
1314 		return err_code;
1315 
1316 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1317 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1318 		return -EINVAL;
1319 #else
1320 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1321 	if (err_code)
1322 		return ERR_PTR(err_code);
1323 
1324 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1325 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1326 		return ERR_PTR(-EINVAL);
1327 
1328 	iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1329 	if (!iwcq)
1330 		return ERR_PTR(-ENOMEM);
1331 #endif
1332 	err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1333 				    &rf->next_cq);
1334 	if (err_code)
1335 #if __FreeBSD_version >= 1400026
1336 		return err_code;
1337 #else
1338 		goto error;
1339 #endif
1340 	cq = &iwcq->sc_cq;
1341 	cq->back_cq = iwcq;
1342 	atomic_set(&iwcq->refcnt, 1);
1343 	spin_lock_init(&iwcq->lock);
1344 	INIT_LIST_HEAD(&iwcq->resize_list);
1345 	INIT_LIST_HEAD(&iwcq->cmpl_generated);
1346 	info.dev = dev;
1347 	ukinfo->cq_size = max(entries, 4);
1348 	ukinfo->cq_id = cq_num;
1349 	cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
1350 	ukinfo->avoid_mem_cflct = cqe_64byte_ena;
1351 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1352 	atomic_set(&iwcq->armed, 0);
1353 	if (attr->comp_vector < rf->ceqs_count)
1354 		info.ceq_id = attr->comp_vector;
1355 	info.ceq_id_valid = true;
1356 	info.ceqe_mask = 1;
1357 	info.type = IRDMA_CQ_TYPE_IWARP;
1358 	info.vsi = &iwdev->vsi;
1359 
1360 	if (udata) {
1361 		struct irdma_ucontext *ucontext;
1362 		struct irdma_create_cq_req req = {0};
1363 		struct irdma_cq_mr *cqmr;
1364 		struct irdma_pbl *iwpbl;
1365 		struct irdma_pbl *iwpbl_shadow;
1366 		struct irdma_cq_mr *cqmr_shadow;
1367 
1368 		iwcq->user_mode = true;
1369 #if __FreeBSD_version >= 1400026
1370 		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1371 #else
1372 		ucontext = to_ucontext(context);
1373 #endif
1374 
1375 		if (ib_copy_from_udata(&req, udata,
1376 				       min(sizeof(req), udata->inlen))) {
1377 			err_code = -EFAULT;
1378 			goto cq_free_rsrc;
1379 		}
1380 
1381 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1382 		iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
1383 				      &ucontext->cq_reg_mem_list);
1384 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1385 		if (!iwpbl) {
1386 			err_code = -EPROTO;
1387 			goto cq_free_rsrc;
1388 		}
1389 		iwcq->iwpbl = iwpbl;
1390 		iwcq->cq_mem_size = 0;
1391 		cqmr = &iwpbl->cq_mr;
1392 
1393 		if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1394 		    IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
1395 			spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1396 			iwpbl_shadow = irdma_get_pbl((unsigned long)req.user_shadow_area,
1397 						     &ucontext->cq_reg_mem_list);
1398 			spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1399 
1400 			if (!iwpbl_shadow) {
1401 				err_code = -EPROTO;
1402 				goto cq_free_rsrc;
1403 			}
1404 			iwcq->iwpbl_shadow = iwpbl_shadow;
1405 			cqmr_shadow = &iwpbl_shadow->cq_mr;
1406 			info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
1407 			cqmr->split = true;
1408 		} else {
1409 			info.shadow_area_pa = cqmr->shadow;
1410 		}
1411 		if (iwpbl->pbl_allocated) {
1412 			info.virtual_map = true;
1413 			info.pbl_chunk_size = 1;
1414 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1415 		} else {
1416 			info.cq_base_pa = cqmr->cq_pbl.addr;
1417 		}
1418 	} else {
1419 		/* Kmode allocations */
1420 		int rsize;
1421 
1422 		if (entries < 1 || entries > rf->max_cqe) {
1423 			err_code = -EINVAL;
1424 			goto cq_free_rsrc;
1425 		}
1426 
1427 		entries++;
1428 		if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1429 			entries *= 2;
1430 		ukinfo->cq_size = entries;
1431 
1432 		if (cqe_64byte_ena)
1433 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
1434 		else
1435 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
1436 		iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE);
1437 		iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
1438 						       iwcq->kmem.size, IRDMA_HW_PAGE_SIZE);
1439 		if (!iwcq->kmem.va) {
1440 			err_code = -ENOMEM;
1441 			goto cq_free_rsrc;
1442 		}
1443 
1444 		iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3;
1445 		iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw,
1446 							      &iwcq->kmem_shadow,
1447 							      iwcq->kmem_shadow.size,
1448 							      64);
1449 
1450 		if (!iwcq->kmem_shadow.va) {
1451 			err_code = -ENOMEM;
1452 			goto cq_kmem_free;
1453 		}
1454 		info.shadow_area_pa = iwcq->kmem_shadow.pa;
1455 		ukinfo->shadow_area = iwcq->kmem_shadow.va;
1456 		ukinfo->cq_base = iwcq->kmem.va;
1457 		info.cq_base_pa = iwcq->kmem.pa;
1458 	}
1459 
1460 	info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
1461 					 (u32)IRDMA_MAX_CQ_READ_THRESH);
1462 	if (irdma_sc_cq_init(cq, &info)) {
1463 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "init cq fail\n");
1464 		err_code = -EPROTO;
1465 		goto cq_kmem_free;
1466 	}
1467 
1468 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1469 	if (!cqp_request) {
1470 		err_code = -ENOMEM;
1471 		goto cq_kmem_free;
1472 	}
1473 	cqp_info = &cqp_request->info;
1474 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1475 	cqp_info->post_sq = 1;
1476 	cqp_info->in.u.cq_create.cq = cq;
1477 	cqp_info->in.u.cq_create.check_overflow = true;
1478 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1479 	status = irdma_handle_cqp_op(rf, cqp_request);
1480 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1481 	if (status) {
1482 		err_code = -ENOMEM;
1483 		goto cq_kmem_free;
1484 	}
1485 
1486 	if (udata) {
1487 		struct irdma_create_cq_resp resp = {0};
1488 
1489 		resp.cq_id = info.cq_uk_init_info.cq_id;
1490 		resp.cq_size = info.cq_uk_init_info.cq_size;
1491 		if (ib_copy_to_udata(udata, &resp,
1492 				     min(sizeof(resp), udata->outlen))) {
1493 			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy to user data\n");
1494 			err_code = -EPROTO;
1495 			goto cq_destroy;
1496 		}
1497 	}
1498 
1499 	rf->cq_table[cq_num] = iwcq;
1500 	init_completion(&iwcq->free_cq);
1501 
1502 #if __FreeBSD_version >= 1400026
1503 	return 0;
1504 #else
1505 	return &iwcq->ibcq;
1506 #endif
1507 cq_destroy:
1508 	irdma_cq_wq_destroy(rf, cq);
1509 cq_kmem_free:
1510 	if (!iwcq->user_mode) {
1511 		irdma_free_dma_mem(dev->hw, &iwcq->kmem);
1512 		irdma_free_dma_mem(dev->hw, &iwcq->kmem_shadow);
1513 	}
1514 cq_free_rsrc:
1515 	irdma_free_rsrc(rf, rf->allocated_cqs, cq_num);
1516 #if __FreeBSD_version >= 1400026
1517 	return err_code;
1518 #else
1519 error:
1520 	kfree(iwcq);
1521 	return ERR_PTR(err_code);
1522 #endif
1523 }
1524 
1525 /**
1526  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
1527  * @iwmr: iwmr for IB's user page addresses
1528  * @pbl: ple pointer to save 1 level or 0 level pble
1529  * @level: indicated level 0, 1 or 2
1530  */
1531 
1532 void
1533 irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
1534 			enum irdma_pble_level level)
1535 {
1536 	struct ib_umem *region = iwmr->region;
1537 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1538 	int chunk_pages, entry, i;
1539 	struct scatterlist *sg;
1540 	u64 pg_addr = 0;
1541 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1542 	struct irdma_pble_info *pinfo;
1543 	u32 idx = 0;
1544 	u32 pbl_cnt = 0;
1545 
1546 	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
1547 	for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1548 		chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size);
1549 		if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page)
1550 			iwpbl->qp_mr.sq_page = sg_page(sg);
1551 		for (i = 0; i < chunk_pages; i++) {
1552 			pg_addr = sg_dma_address(sg) + (i * iwmr->page_size);
1553 			if ((entry + i) == 0)
1554 				*pbl = pg_addr & iwmr->page_msk;
1555 			else if (!(pg_addr & ~iwmr->page_msk))
1556 				*pbl = pg_addr;
1557 			else
1558 				continue;
1559 			if (++pbl_cnt == palloc->total_cnt)
1560 				break;
1561 			pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
1562 		}
1563 	}
1564 }
1565 
1566 /**
1567  * irdma_destroy_ah - Destroy address handle
1568  * @ibah: pointer to address handle
1569  * @ah_flags: destroy flags
1570  */
1571 
1572 #if __FreeBSD_version >= 1400026
1573 void
1574 irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
1575 {
1576 	struct irdma_device *iwdev = to_iwdev(ibah->device);
1577 	struct irdma_ah *ah = to_iwah(ibah);
1578 
1579 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1580 			false, NULL, ah);
1581 
1582 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1583 			ah->sc_ah.ah_info.ah_idx);
1584 }
1585 #endif
1586 
1587 #if __FreeBSD_version < 1400026
1588 int
1589 irdma_destroy_ah(struct ib_ah *ibah)
1590 {
1591 	struct irdma_device *iwdev = to_iwdev(ibah->device);
1592 	struct irdma_ah *ah = to_iwah(ibah);
1593 
1594 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1595 			false, NULL, ah);
1596 
1597 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1598 			ah->sc_ah.ah_info.ah_idx);
1599 
1600 	kfree(ah);
1601 	return 0;
1602 }
1603 #endif
1604 
1605 #if __FreeBSD_version >= 1400026
1606 int
1607 irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1608 #else
1609 int
1610 irdma_dereg_mr(struct ib_mr *ib_mr)
1611 #endif
1612 {
1613 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
1614 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1615 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1616 	int ret;
1617 
1618 	if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
1619 		if (iwmr->region) {
1620 			struct irdma_ucontext *ucontext;
1621 #if __FreeBSD_version >= 1400026
1622 
1623 			ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1624 
1625 #else
1626 			struct ib_pd *ibpd = ib_mr->pd;
1627 
1628 			ucontext = to_ucontext(ibpd->uobject->context);
1629 #endif
1630 			irdma_del_memlist(iwmr, ucontext);
1631 		}
1632 		goto done;
1633 	}
1634 
1635 	ret = irdma_hwdereg_mr(ib_mr);
1636 	if (ret)
1637 		return ret;
1638 
1639 	irdma_free_stag(iwdev, iwmr->stag);
1640 done:
1641 	if (iwpbl->pbl_allocated)
1642 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
1643 
1644 	if (iwmr->region)
1645 		ib_umem_release(iwmr->region);
1646 
1647 	kfree(iwmr);
1648 
1649 	return 0;
1650 }
1651 
1652 /*
1653  * irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to
1654  * indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address
1655  * @new access flags: bit mask of access flags @new_pd: ptr of pd @udata: user data
1656  */
1657 int
1658 irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
1659 		    u64 virt, int new_access, struct ib_pd *new_pd,
1660 		    struct ib_udata *udata)
1661 {
1662 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1663 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
1664 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1665 	int ret;
1666 
1667 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
1668 		return -EINVAL;
1669 
1670 	if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1671 		return -EOPNOTSUPP;
1672 
1673 	ret = irdma_hwdereg_mr(ib_mr);
1674 	if (ret)
1675 		return ret;
1676 
1677 	if (flags & IB_MR_REREG_ACCESS)
1678 		iwmr->access = new_access;
1679 
1680 	if (flags & IB_MR_REREG_PD) {
1681 		iwmr->ibmr.pd = new_pd;
1682 		iwmr->ibmr.device = new_pd->device;
1683 	}
1684 
1685 	if (flags & IB_MR_REREG_TRANS) {
1686 		if (iwpbl->pbl_allocated) {
1687 			irdma_free_pble(iwdev->rf->pble_rsrc,
1688 					&iwpbl->pble_alloc);
1689 			iwpbl->pbl_allocated = false;
1690 		}
1691 		if (iwmr->region) {
1692 			ib_umem_release(iwmr->region);
1693 			iwmr->region = NULL;
1694 		}
1695 
1696 		ib_mr = irdma_rereg_mr_trans(iwmr, start, len, virt, udata);
1697 		if (IS_ERR(ib_mr))
1698 			return PTR_ERR(ib_mr);
1699 
1700 	} else {
1701 		ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
1702 		if (ret)
1703 			return ret;
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 int
1710 kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
1711 			  u16 *vlan_id)
1712 {
1713 	int ret;
1714 	union ib_gid sgid;
1715 	struct ib_gid_attr sgid_attr;
1716 	struct irdma_av *av = &iwqp->roce_ah.av;
1717 
1718 	ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num,
1719 				attr->ah_attr.grh.sgid_index, &sgid,
1720 				&sgid_attr);
1721 	if (ret)
1722 		return ret;
1723 
1724 	if (sgid_attr.ndev) {
1725 		*vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
1726 		ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, if_getlladdr(sgid_attr.ndev));
1727 	}
1728 
1729 	av->net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
1730 	rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
1731 	dev_put(sgid_attr.ndev);
1732 	iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
1733 
1734 	return 0;
1735 }
1736 
1737 #if __FreeBSD_version >= 1400026
1738 /**
1739  * irdma_destroy_cq - destroy cq
1740  * @ib_cq: cq pointer
1741  * @udata: user data
1742  */
1743 void
1744 irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1745 {
1746 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1747 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1748 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1749 	struct irdma_sc_dev *dev = cq->dev;
1750 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1751 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1752 	unsigned long flags;
1753 
1754 	spin_lock_irqsave(&iwcq->lock, flags);
1755 	if (!list_empty(&iwcq->cmpl_generated))
1756 		irdma_remove_cmpls_list(iwcq);
1757 	if (!list_empty(&iwcq->resize_list))
1758 		irdma_process_resize_list(iwcq, iwdev, NULL);
1759 	spin_unlock_irqrestore(&iwcq->lock, flags);
1760 
1761 	irdma_cq_rem_ref(ib_cq);
1762 	wait_for_completion(&iwcq->free_cq);
1763 
1764 	irdma_cq_wq_destroy(iwdev->rf, cq);
1765 
1766 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1767 	irdma_sc_cleanup_ceqes(cq, ceq);
1768 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1769 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1770 }
1771 
1772 #endif
1773 #if __FreeBSD_version < 1400026
1774 /**
1775  * irdma_destroy_cq - destroy cq
1776  * @ib_cq: cq pointer
1777  */
1778 int
1779 irdma_destroy_cq(struct ib_cq *ib_cq)
1780 {
1781 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1782 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1783 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1784 	struct irdma_sc_dev *dev = cq->dev;
1785 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1786 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1787 	unsigned long flags;
1788 
1789 	spin_lock_irqsave(&iwcq->lock, flags);
1790 	if (!list_empty(&iwcq->cmpl_generated))
1791 		irdma_remove_cmpls_list(iwcq);
1792 	if (!list_empty(&iwcq->resize_list))
1793 		irdma_process_resize_list(iwcq, iwdev, NULL);
1794 	spin_unlock_irqrestore(&iwcq->lock, flags);
1795 
1796 	irdma_cq_rem_ref(ib_cq);
1797 	wait_for_completion(&iwcq->free_cq);
1798 
1799 	irdma_cq_wq_destroy(iwdev->rf, cq);
1800 
1801 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1802 	irdma_sc_cleanup_ceqes(cq, ceq);
1803 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1804 
1805 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1806 	kfree(iwcq);
1807 
1808 	return 0;
1809 }
1810 
1811 #endif
1812 /**
1813  * irdma_alloc_mw - Allocate memory window
1814  * @pd: Protection domain
1815  * @type: Window type
1816  * @udata: user data pointer
1817  */
1818 struct ib_mw *
1819 irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1820 	       struct ib_udata *udata)
1821 {
1822 	struct irdma_device *iwdev = to_iwdev(pd->device);
1823 	struct irdma_mr *iwmr;
1824 	int err_code;
1825 	u32 stag;
1826 
1827 	if (type != IB_MW_TYPE_1 && type != IB_MW_TYPE_2)
1828 		return ERR_PTR(-EINVAL);
1829 
1830 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1831 	if (!iwmr)
1832 		return ERR_PTR(-ENOMEM);
1833 
1834 	stag = irdma_create_stag(iwdev);
1835 	if (!stag) {
1836 		kfree(iwmr);
1837 		return ERR_PTR(-ENOMEM);
1838 	}
1839 
1840 	iwmr->stag = stag;
1841 	iwmr->ibmw.rkey = stag;
1842 	iwmr->ibmw.pd = pd;
1843 	iwmr->ibmw.type = type;
1844 	iwmr->ibmw.device = pd->device;
1845 
1846 	err_code = irdma_hw_alloc_mw(iwdev, iwmr);
1847 	if (err_code) {
1848 		irdma_free_stag(iwdev, stag);
1849 		kfree(iwmr);
1850 		return ERR_PTR(err_code);
1851 	}
1852 
1853 	return &iwmr->ibmw;
1854 }
1855 
1856 /**
1857  * kc_set_loc_seq_num_mss - Set local seq number and mss
1858  * @cm_node: cm node info
1859  */
1860 void
1861 kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node)
1862 {
1863 	struct timespec ts;
1864 
1865 	getnanotime(&ts);
1866 	cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
1867 	if (cm_node->iwdev->vsi.mtu > 1500 &&
1868 	    2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd)
1869 		cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1870 		    (1500 - IRDMA_MTU_TO_MSS_IPV4) :
1871 		    (1500 - IRDMA_MTU_TO_MSS_IPV6);
1872 	else
1873 		cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1874 		    (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) :
1875 		    (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
1876 }
1877 
1878 #if __FreeBSD_version < 1400026
1879 struct irdma_vma_data {
1880 	struct list_head list;
1881 	struct vm_area_struct *vma;
1882 	struct mutex *vma_list_mutex;	/* protect the vma_list */
1883 };
1884 
1885 /**
1886  * irdma_vma_open -
1887  * @vma: User VMA
1888  */
1889 static void
1890 irdma_vma_open(struct vm_area_struct *vma)
1891 {
1892 	vma->vm_ops = NULL;
1893 }
1894 
1895 /**
1896  * irdma_vma_close - Remove vma data from vma list
1897  * @vma: User VMA
1898  */
1899 static void
1900 irdma_vma_close(struct vm_area_struct *vma)
1901 {
1902 	struct irdma_vma_data *vma_data;
1903 
1904 	vma_data = vma->vm_private_data;
1905 	vma->vm_private_data = NULL;
1906 	vma_data->vma = NULL;
1907 	mutex_lock(vma_data->vma_list_mutex);
1908 	list_del(&vma_data->list);
1909 	mutex_unlock(vma_data->vma_list_mutex);
1910 	kfree(vma_data);
1911 }
1912 
1913 static const struct vm_operations_struct irdma_vm_ops = {
1914 	.open = irdma_vma_open,
1915 	.close = irdma_vma_close
1916 };
1917 
1918 /**
1919  * irdma_set_vma_data - Save vma data in context list
1920  * @vma: User VMA
1921  * @context: ib user context
1922  */
1923 static int
1924 irdma_set_vma_data(struct vm_area_struct *vma,
1925 		   struct irdma_ucontext *context)
1926 {
1927 	struct list_head *vma_head = &context->vma_list;
1928 	struct irdma_vma_data *vma_entry;
1929 
1930 	vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL);
1931 	if (!vma_entry)
1932 		return -ENOMEM;
1933 
1934 	vma->vm_private_data = vma_entry;
1935 	vma->vm_ops = &irdma_vm_ops;
1936 
1937 	vma_entry->vma = vma;
1938 	vma_entry->vma_list_mutex = &context->vma_list_mutex;
1939 
1940 	mutex_lock(&context->vma_list_mutex);
1941 	list_add(&vma_entry->list, vma_head);
1942 	mutex_unlock(&context->vma_list_mutex);
1943 
1944 	return 0;
1945 }
1946 
1947 /**
1948  * irdma_disassociate_ucontext - Disassociate user context
1949  * @context: ib user context
1950  */
1951 void
1952 irdma_disassociate_ucontext(struct ib_ucontext *context)
1953 {
1954 	struct irdma_ucontext *ucontext = to_ucontext(context);
1955 
1956 	struct irdma_vma_data *vma_data, *n;
1957 	struct vm_area_struct *vma;
1958 
1959 	mutex_lock(&ucontext->vma_list_mutex);
1960 	list_for_each_entry_safe(vma_data, n, &ucontext->vma_list, list) {
1961 		vma = vma_data->vma;
1962 		zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
1963 
1964 		vma->vm_ops = NULL;
1965 		list_del(&vma_data->list);
1966 		kfree(vma_data);
1967 	}
1968 	mutex_unlock(&ucontext->vma_list_mutex);
1969 }
1970 
1971 int
1972 rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma,
1973 		  unsigned long pfn, unsigned long size, pgprot_t prot)
1974 {
1975 	if (io_remap_pfn_range(vma,
1976 			       vma->vm_start,
1977 			       pfn,
1978 			       size,
1979 			       prot))
1980 		return -EAGAIN;
1981 
1982 	return irdma_set_vma_data(vma, to_ucontext(context));
1983 }
1984 #else
1985 /**
1986  * irdma_disassociate_ucontext - Disassociate user context
1987  * @context: ib user context
1988  */
1989 void
1990 irdma_disassociate_ucontext(struct ib_ucontext *context)
1991 {
1992 }
1993 #endif
1994 
1995 struct ib_device *
1996 ib_device_get_by_netdev(if_t netdev, int driver_id)
1997 {
1998 	struct irdma_device *iwdev;
1999 	struct irdma_handler *hdl;
2000 	unsigned long flags;
2001 
2002 	spin_lock_irqsave(&irdma_handler_lock, flags);
2003 	list_for_each_entry(hdl, &irdma_handlers, list) {
2004 		iwdev = hdl->iwdev;
2005 		if (netdev == iwdev->netdev) {
2006 			spin_unlock_irqrestore(&irdma_handler_lock,
2007 					       flags);
2008 			return &iwdev->ibdev;
2009 		}
2010 	}
2011 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
2012 
2013 	return NULL;
2014 }
2015 
2016 void
2017 ib_unregister_device_put(struct ib_device *device)
2018 {
2019 	ib_unregister_device(device);
2020 }
2021 
2022 /**
2023  * irdma_query_gid_roce - Query port GID for Roce
2024  * @ibdev: device pointer from stack
2025  * @port: port number
2026  * @index: Entry index
2027  * @gid: Global ID
2028  */
2029 int
2030 irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
2031 		     union ib_gid *gid)
2032 {
2033 	int ret;
2034 
2035 	ret = rdma_query_gid(ibdev, port, index, gid);
2036 	if (ret == -EAGAIN) {
2037 		memcpy(gid, &zgid, sizeof(*gid));
2038 		return 0;
2039 	}
2040 
2041 	return ret;
2042 }
2043 
2044 /**
2045  * irdma_modify_port - modify port attributes
2046  * @ibdev: device pointer from stack
2047  * @port: port number for query
2048  * @mask: Property mask
2049  * @props: returning device attributes
2050  */
2051 int
2052 irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
2053 		  struct ib_port_modify *props)
2054 {
2055 	if (port > 1)
2056 		return -EINVAL;
2057 
2058 	return 0;
2059 }
2060 
2061 /**
2062  * irdma_query_pkey - Query partition key
2063  * @ibdev: device pointer from stack
2064  * @port: port number
2065  * @index: index of pkey
2066  * @pkey: pointer to store the pkey
2067  */
2068 int
2069 irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
2070 		 u16 *pkey)
2071 {
2072 	if (index >= IRDMA_PKEY_TBL_SZ)
2073 		return -EINVAL;
2074 
2075 	*pkey = IRDMA_DEFAULT_PKEY;
2076 	return 0;
2077 }
2078 
2079 int
2080 irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
2081 			  struct ib_port_immutable *immutable)
2082 {
2083 	struct ib_port_attr attr;
2084 	int err;
2085 
2086 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2087 	err = ib_query_port(ibdev, port_num, &attr);
2088 	if (err)
2089 		return err;
2090 
2091 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2092 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2093 	immutable->gid_tbl_len = attr.gid_tbl_len;
2094 
2095 	return 0;
2096 }
2097 
2098 int
2099 irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2100 			struct ib_port_immutable *immutable)
2101 {
2102 	struct ib_port_attr attr;
2103 	int err;
2104 
2105 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2106 	err = ib_query_port(ibdev, port_num, &attr);
2107 	if (err)
2108 		return err;
2109 	immutable->gid_tbl_len = 1;
2110 
2111 	return 0;
2112 }
2113 
2114 /**
2115  * irdma_query_port - get port attributes
2116  * @ibdev: device pointer from stack
2117  * @port: port number for query
2118  * @props: returning device attributes
2119  */
2120 int
2121 irdma_query_port(struct ib_device *ibdev, u8 port,
2122 		 struct ib_port_attr *props)
2123 {
2124 	struct irdma_device *iwdev = to_iwdev(ibdev);
2125 	if_t netdev = iwdev->netdev;
2126 
2127 	/* no need to zero out pros here. done by caller */
2128 
2129 	props->max_mtu = IB_MTU_4096;
2130 	props->active_mtu = ib_mtu_int_to_enum(if_getmtu(netdev));
2131 	props->lid = 1;
2132 	props->lmc = 0;
2133 	props->sm_lid = 0;
2134 	props->sm_sl = 0;
2135 	if ((if_getlinkstate(netdev) == LINK_STATE_UP) &&
2136 	    (if_getdrvflags(netdev) & IFF_DRV_RUNNING)) {
2137 		props->state = IB_PORT_ACTIVE;
2138 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
2139 	} else {
2140 		props->state = IB_PORT_DOWN;
2141 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
2142 	}
2143 	ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width);
2144 
2145 	if (rdma_protocol_roce(ibdev, 1)) {
2146 		props->gid_tbl_len = 32;
2147 	        props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
2148 		props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
2149 	} else {
2150 		props->gid_tbl_len = 1;
2151 	}
2152 	props->qkey_viol_cntr = 0;
2153 	props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
2154 	props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
2155 
2156 	return 0;
2157 }
2158 
2159 static const char *const irdma_hw_stat_names[] = {
2160 	/* gen1 - 32-bit */
2161 	[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2162 	[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2163 	[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2164 	[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2165 	[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2166 	[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2167 	[IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
2168 	/* gen1 - 64-bit */
2169 	[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
2170 	[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
2171 	[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
2172 	[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
2173 	[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
2174 	[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
2175 	[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
2176 	[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
2177 	[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
2178 	[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
2179 	[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
2180 	[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
2181 	[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
2182 	[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
2183 	[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
2184 	[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
2185 	[IRDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads",
2186 	[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends",
2187 	[IRDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites",
2188 	[IRDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads",
2189 	[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends",
2190 	[IRDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites",
2191 	[IRDMA_HW_STAT_INDEX_RDMAVBND] = "RdmaBnd",
2192 	[IRDMA_HW_STAT_INDEX_RDMAVINV] = "RdmaInv",
2193 
2194 	/* gen2 - 32-bit */
2195 	[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
2196 	[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
2197 	[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
2198 	/* gen2 - 64-bit */
2199 	[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
2200 	[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
2201 	[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
2202 	[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
2203 	[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
2204 	[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
2205 	[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
2206 	[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "RetransSegs",
2207 	[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "InOptErrors",
2208 	[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "InProtoErrors",
2209 	[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "InSegs",
2210 	[IRDMA_HW_STAT_INDEX_TCPTXSEG] = "OutSegs",
2211 };
2212 
2213 /**
2214  * irdma_alloc_hw_stats - Allocate a hw stats structure
2215  * @ibdev: device pointer from stack
2216  * @port_num: port number
2217  */
2218 struct rdma_hw_stats *
2219 irdma_alloc_hw_stats(struct ib_device *ibdev,
2220 		     u8 port_num)
2221 {
2222 	struct irdma_device *iwdev = to_iwdev(ibdev);
2223 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
2224 
2225 	int num_counters = dev->hw_attrs.max_stat_idx;
2226 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2227 
2228 	return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
2229 					  lifespan);
2230 }
2231 
2232 /**
2233  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
2234  * @ibdev: device pointer from stack
2235  * @stats: stats pointer from stack
2236  * @port_num: port number
2237  * @index: which hw counter the stack is requesting we update
2238  */
2239 int
2240 irdma_get_hw_stats(struct ib_device *ibdev,
2241 		   struct rdma_hw_stats *stats, u8 port_num,
2242 		   int index)
2243 {
2244 	struct irdma_device *iwdev = to_iwdev(ibdev);
2245 	struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
2246 
2247 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
2248 		irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
2249 
2250 	memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters);
2251 
2252 	return stats->num_counters;
2253 }
2254 
2255 /**
2256  * irdma_query_gid - Query port GID
2257  * @ibdev: device pointer from stack
2258  * @port: port number
2259  * @index: Entry index
2260  * @gid: Global ID
2261  */
2262 int
2263 irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
2264 		union ib_gid *gid)
2265 {
2266 	struct irdma_device *iwdev = to_iwdev(ibdev);
2267 
2268 	memset(gid->raw, 0, sizeof(gid->raw));
2269 	ether_addr_copy(gid->raw, if_getlladdr(iwdev->netdev));
2270 
2271 	return 0;
2272 }
2273 
2274 enum rdma_link_layer
2275 irdma_get_link_layer(struct ib_device *ibdev,
2276 		     u8 port_num)
2277 {
2278 	return IB_LINK_LAYER_ETHERNET;
2279 }
2280 
2281 inline enum ib_mtu
2282 ib_mtu_int_to_enum(int mtu)
2283 {
2284 	if (mtu >= 4096)
2285 		return IB_MTU_4096;
2286 	else if (mtu >= 2048)
2287 		return IB_MTU_2048;
2288 	else if (mtu >= 1024)
2289 		return IB_MTU_1024;
2290 	else if (mtu >= 512)
2291 		return IB_MTU_512;
2292 	else
2293 		return IB_MTU_256;
2294 }
2295 
2296 inline void
2297 kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev)
2298 {
2299 	iwdev->ibdev.uverbs_cmd_mask |=
2300 	    BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
2301 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
2302 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
2303 	    BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST);
2304 }
2305 
2306 inline void
2307 kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
2308 {
2309 	iwdev->ibdev.uverbs_cmd_mask =
2310 	    BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
2311 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
2312 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
2313 	    BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
2314 	    BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
2315 	    BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
2316 	    BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
2317 	    BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
2318 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2319 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
2320 	    BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) |
2321 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
2322 	    BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2323 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
2324 	    BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
2325 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
2326 	    BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
2327 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
2328 	    BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
2329 	    BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
2330 	    BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
2331 	    BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
2332 	    BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
2333 	iwdev->ibdev.uverbs_ex_cmd_mask =
2334 	    BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) |
2335 	    BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
2336 
2337 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
2338 		iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);
2339 }
2340 
2341 int
2342 ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u8 *speed, u8 *width)
2343 {
2344 	if_t netdev = ibdev->get_netdev(ibdev, port_num);
2345 	u32 netdev_speed;
2346 
2347 	if (!netdev)
2348 		return -ENODEV;
2349 
2350 	netdev_speed = if_getbaudrate(netdev);
2351 	dev_put(netdev);
2352 	if (netdev_speed <= SPEED_1000) {
2353 		*width = IB_WIDTH_1X;
2354 		*speed = IB_SPEED_SDR;
2355 	} else if (netdev_speed <= SPEED_10000) {
2356 		*width = IB_WIDTH_1X;
2357 		*speed = IB_SPEED_FDR10;
2358 	} else if (netdev_speed <= SPEED_20000) {
2359 		*width = IB_WIDTH_4X;
2360 		*speed = IB_SPEED_DDR;
2361 	} else if (netdev_speed <= SPEED_25000) {
2362 		*width = IB_WIDTH_1X;
2363 		*speed = IB_SPEED_EDR;
2364 	} else if (netdev_speed <= SPEED_40000) {
2365 		*width = IB_WIDTH_4X;
2366 		*speed = IB_SPEED_FDR10;
2367 	} else {
2368 		*width = IB_WIDTH_4X;
2369 		*speed = IB_SPEED_EDR;
2370 	}
2371 
2372 	return 0;
2373 }
2374 
2375 u64
2376 irdma_mac_to_u64(const u8 *eth_add)
2377 {
2378 	int idx;
2379 	u64 u64_eth_add;
2380 
2381 	for (idx = 0, u64_eth_add = 0; idx < ETHER_ADDR_LEN; idx++)
2382 		u64_eth_add = u64_eth_add << 8 | eth_add[idx];
2383 
2384 	return u64_eth_add;
2385 }
2386