xref: /freebsd/sys/dev/irdma/irdma_kcompat.c (revision d9a42747950146bf03cda7f6e25d219253f8a57a)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2018 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "irdma_main.h"
37 
38 #define IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
39 
40 static u16 kc_rdma_flow_label_to_udp_sport(u32 fl) {
41 	u32 fl_low = fl & 0x03FFF;
42 	u32 fl_high = fl & 0xFC000;
43 
44 	fl_low ^= fl_high >> 14;
45 
46 	return (u16)(fl_low | IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN);
47 }
48 
49 #define IRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF)
50 
51 static u32 kc_rdma_calc_flow_label(u32 lqpn, u32 rqpn) {
52 	u64 fl = (u64)lqpn * rqpn;
53 
54 	fl ^= fl >> 20;
55 	fl ^= fl >> 40;
56 
57 	return (u32)(fl & IRDMA_GRH_FLOWLABEL_MASK);
58 }
59 
60 u16
61 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
62 {
63 	if (!fl)
64 		fl = kc_rdma_calc_flow_label(lqpn, rqpn);
65 	return kc_rdma_flow_label_to_udp_sport(fl);
66 }
67 
68 void
69 irdma_get_dev_fw_str(struct ib_device *dev,
70 		     char *str,
71 		     size_t str_len)
72 {
73 	struct irdma_device *iwdev = to_iwdev(dev);
74 
75 	snprintf(str, str_len, "%u.%u",
76 		 irdma_fw_major_ver(&iwdev->rf->sc_dev),
77 		 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
78 }
79 
80 int
81 irdma_add_gid(struct ib_device *device,
82 	      u8 port_num,
83 	      unsigned int index,
84 	      const union ib_gid *gid,
85 	      const struct ib_gid_attr *attr,
86 	      void **context)
87 {
88 	return 0;
89 }
90 
91 int
92 irdma_del_gid(struct ib_device *device,
93 	      u8 port_num,
94 	      unsigned int index,
95 	      void **context)
96 {
97 	return 0;
98 }
99 
100 #if __FreeBSD_version >= 1400026
101 /**
102  * irdma_alloc_mr - register stag for fast memory registration
103  * @pd: ibpd pointer
104  * @mr_type: memory for stag registrion
105  * @max_num_sg: man number of pages
106  * @udata: user data
107  */
108 struct ib_mr *
109 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
110 	       u32 max_num_sg, struct ib_udata *udata)
111 {
112 #else
113 /**
114  * irdma_alloc_mr - register stag for fast memory registration
115  * @pd: ibpd pointer
116  * @mr_type: memory for stag registrion
117  * @max_num_sg: man number of pages
118  */
119 struct ib_mr *
120 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
121 	       u32 max_num_sg)
122 {
123 #endif
124 	struct irdma_device *iwdev = to_iwdev(pd->device);
125 	struct irdma_pble_alloc *palloc;
126 	struct irdma_pbl *iwpbl;
127 	struct irdma_mr *iwmr;
128 	int status;
129 	u32 stag;
130 	int err_code = -ENOMEM;
131 
132 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
133 	if (!iwmr)
134 		return ERR_PTR(-ENOMEM);
135 
136 	stag = irdma_create_stag(iwdev);
137 	if (!stag) {
138 		err_code = -ENOMEM;
139 		goto err;
140 	}
141 
142 	iwmr->stag = stag;
143 	iwmr->ibmr.rkey = stag;
144 	iwmr->ibmr.lkey = stag;
145 	iwmr->ibmr.pd = pd;
146 	iwmr->ibmr.device = pd->device;
147 	iwpbl = &iwmr->iwpbl;
148 	iwpbl->iwmr = iwmr;
149 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
150 	palloc = &iwpbl->pble_alloc;
151 	iwmr->page_cnt = max_num_sg;
152 	/* Assume system PAGE_SIZE as the sg page sizes are unknown. */
153 	iwmr->len = max_num_sg * PAGE_SIZE;
154 	status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
155 				false);
156 	if (status)
157 		goto err_get_pble;
158 
159 	err_code = irdma_hw_alloc_stag(iwdev, iwmr);
160 	if (err_code)
161 		goto err_alloc_stag;
162 
163 	iwpbl->pbl_allocated = true;
164 
165 	return &iwmr->ibmr;
166 err_alloc_stag:
167 	irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
168 err_get_pble:
169 	irdma_free_stag(iwdev, stag);
170 err:
171 	kfree(iwmr);
172 
173 	return ERR_PTR(err_code);
174 }
175 
176 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
177 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
178 #if __FreeBSD_version >= 1400026
179 /**
180  * irdma_alloc_ucontext - Allocate the user context data structure
181  * @uctx: context
182  * @udata: user data
183  *
184  * This keeps track of all objects associated with a particular
185  * user-mode client.
186  */
187 int
188 irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
189 {
190 	struct ib_device *ibdev = uctx->device;
191 	struct irdma_device *iwdev = to_iwdev(ibdev);
192 	struct irdma_alloc_ucontext_req req = {0};
193 	struct irdma_alloc_ucontext_resp uresp = {0};
194 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
195 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
196 
197 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
198 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
199 		return -EINVAL;
200 
201 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
202 		return -EINVAL;
203 
204 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
205 		goto ver_error;
206 
207 	ucontext->iwdev = iwdev;
208 	ucontext->abi_ver = req.userspace_ver;
209 
210 	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
211 		ucontext->use_raw_attrs = true;
212 
213 	/* GEN_1 support for libi40iw */
214 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
215 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
216 			return -EOPNOTSUPP;
217 
218 		ucontext->legacy_mode = true;
219 		uresp.max_qps = iwdev->rf->max_qp;
220 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
221 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
222 		uresp.kernel_ver = req.userspace_ver;
223 		if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen)))
224 			return -EFAULT;
225 	} else {
226 		u64 bar_off;
227 
228 		uresp.kernel_ver = IRDMA_ABI_VER;
229 		uresp.feature_flags = uk_attrs->feature_flags;
230 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
231 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
232 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
233 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
234 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
235 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
236 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
237 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
238 		uresp.hw_rev = uk_attrs->hw_rev;
239 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
240 
241 		bar_off =
242 		    (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
243 		ucontext->db_mmap_entry =
244 		    irdma_user_mmap_entry_insert(ucontext, bar_off,
245 						 IRDMA_MMAP_IO_NC,
246 						 &uresp.db_mmap_key);
247 		if (!ucontext->db_mmap_entry) {
248 			return -ENOMEM;
249 		}
250 
251 		if (ib_copy_to_udata(udata, &uresp,
252 				     min(sizeof(uresp), udata->outlen))) {
253 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
254 			return -EFAULT;
255 		}
256 	}
257 
258 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
259 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
260 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
261 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
262 	INIT_LIST_HEAD(&ucontext->vma_list);
263 	mutex_init(&ucontext->vma_list_mutex);
264 
265 	return 0;
266 
267 ver_error:
268 	irdma_dev_err(&iwdev->ibdev,
269 		      "Invalid userspace driver version detected. Detected version %d, should be %d\n",
270 		      req.userspace_ver, IRDMA_ABI_VER);
271 	return -EINVAL;
272 }
273 #endif
274 
275 #if __FreeBSD_version < 1400026
276 /**
277  * irdma_alloc_ucontext - Allocate the user context data structure
278  * @ibdev: ib device pointer
279  * @udata: user data
280  *
281  * This keeps track of all objects associated with a particular
282  * user-mode client.
283  */
284 struct ib_ucontext *
285 irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
286 {
287 	struct irdma_device *iwdev = to_iwdev(ibdev);
288 	struct irdma_alloc_ucontext_req req = {0};
289 	struct irdma_alloc_ucontext_resp uresp = {0};
290 	struct irdma_ucontext *ucontext;
291 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
292 
293 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
294 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
295 		return ERR_PTR(-EINVAL);
296 
297 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
298 		return ERR_PTR(-EINVAL);
299 
300 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
301 		goto ver_error;
302 
303 	ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
304 	if (!ucontext)
305 		return ERR_PTR(-ENOMEM);
306 
307 	ucontext->iwdev = iwdev;
308 	ucontext->abi_ver = req.userspace_ver;
309 
310 	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
311 		ucontext->use_raw_attrs = true;
312 
313 	/* GEN_1 legacy support with libi40iw */
314 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
315 		if (uk_attrs->hw_rev != IRDMA_GEN_1) {
316 			kfree(ucontext);
317 			return ERR_PTR(-EOPNOTSUPP);
318 		}
319 
320 		ucontext->legacy_mode = true;
321 		uresp.max_qps = iwdev->rf->max_qp;
322 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
323 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
324 		uresp.kernel_ver = req.userspace_ver;
325 		if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) {
326 			kfree(ucontext);
327 			return ERR_PTR(-EFAULT);
328 		}
329 	} else {
330 		u64 bar_off;
331 
332 		uresp.kernel_ver = IRDMA_ABI_VER;
333 		uresp.feature_flags = uk_attrs->feature_flags;
334 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
335 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
336 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
337 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
338 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
339 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
340 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
341 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
342 		uresp.hw_rev = uk_attrs->hw_rev;
343 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
344 
345 		bar_off =
346 		    (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
347 
348 		spin_lock_init(&ucontext->mmap_tbl_lock);
349 		ucontext->db_mmap_entry =
350 		    irdma_user_mmap_entry_add_hash(ucontext, bar_off,
351 						   IRDMA_MMAP_IO_NC,
352 						   &uresp.db_mmap_key);
353 		if (!ucontext->db_mmap_entry) {
354 			spin_lock_destroy(&ucontext->mmap_tbl_lock);
355 			kfree(ucontext);
356 			return ERR_PTR(-ENOMEM);
357 		}
358 
359 		if (ib_copy_to_udata(udata, &uresp,
360 				     min(sizeof(uresp), udata->outlen))) {
361 			irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
362 			spin_lock_destroy(&ucontext->mmap_tbl_lock);
363 			kfree(ucontext);
364 			return ERR_PTR(-EFAULT);
365 		}
366 	}
367 
368 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
369 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
370 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
371 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
372 	INIT_LIST_HEAD(&ucontext->vma_list);
373 	mutex_init(&ucontext->vma_list_mutex);
374 
375 	return &ucontext->ibucontext;
376 
377 ver_error:
378 	irdma_dev_err(&iwdev->ibdev,
379 		      "Invalid userspace driver version detected. Detected version %d, should be %d\n",
380 		      req.userspace_ver, IRDMA_ABI_VER);
381 	return ERR_PTR(-EINVAL);
382 }
383 #endif
384 
385 #if __FreeBSD_version >= 1400026
386 /**
387  * irdma_dealloc_ucontext - deallocate the user context data structure
388  * @context: user context created during alloc
389  */
390 void
391 irdma_dealloc_ucontext(struct ib_ucontext *context)
392 {
393 	struct irdma_ucontext *ucontext = to_ucontext(context);
394 
395 	rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
396 
397 	return;
398 }
399 #endif
400 
401 #if __FreeBSD_version < 1400026
402 /**
403  * irdma_dealloc_ucontext - deallocate the user context data structure
404  * @context: user context created during alloc
405  */
406 int
407 irdma_dealloc_ucontext(struct ib_ucontext *context)
408 {
409 	struct irdma_ucontext *ucontext = to_ucontext(context);
410 
411 	irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
412 	spin_lock_destroy(&ucontext->mmap_tbl_lock);
413 	kfree(ucontext);
414 
415 	return 0;
416 }
417 #endif
418 
419 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
420 #if __FreeBSD_version >= 1400026
421 /**
422  * irdma_alloc_pd - allocate protection domain
423  * @pd: protection domain
424  * @udata: user data
425  */
426 int
427 irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
428 {
429 	struct irdma_pd *iwpd = to_iwpd(pd);
430 	struct irdma_device *iwdev = to_iwdev(pd->device);
431 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
432 	struct irdma_pci_f *rf = iwdev->rf;
433 	struct irdma_alloc_pd_resp uresp = {0};
434 	struct irdma_sc_pd *sc_pd;
435 	u32 pd_id = 0;
436 	int err;
437 
438 	if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
439 		return -EINVAL;
440 
441 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
442 			       &rf->next_pd);
443 	if (err)
444 		return err;
445 
446 	sc_pd = &iwpd->sc_pd;
447 	if (udata) {
448 		struct irdma_ucontext *ucontext =
449 		rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
450 
451 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
452 		uresp.pd_id = pd_id;
453 		if (ib_copy_to_udata(udata, &uresp,
454 				     min(sizeof(uresp), udata->outlen))) {
455 			err = -EFAULT;
456 			goto error;
457 		}
458 	} else {
459 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
460 	}
461 
462 	spin_lock_init(&iwpd->udqp_list_lock);
463 	INIT_LIST_HEAD(&iwpd->udqp_list);
464 
465 	return 0;
466 
467 error:
468 
469 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
470 
471 	return err;
472 }
473 #endif
474 
475 #if __FreeBSD_version < 1400026
476 /**
477  * irdma_alloc_pd - allocate protection domain
478  * @ibdev: IB device
479  * @context: user context
480  * @udata: user data
481  */
482 struct ib_pd *
483 irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata)
484 {
485 	struct irdma_pd *iwpd;
486 	struct irdma_device *iwdev = to_iwdev(ibdev);
487 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
488 	struct irdma_pci_f *rf = iwdev->rf;
489 	struct irdma_alloc_pd_resp uresp = {0};
490 	struct irdma_sc_pd *sc_pd;
491 	u32 pd_id = 0;
492 	int err;
493 
494 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
495 			       &rf->next_pd);
496 	if (err)
497 		return ERR_PTR(err);
498 
499 	iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
500 	if (!iwpd) {
501 		err = -ENOMEM;
502 		goto free_res;
503 	}
504 
505 	sc_pd = &iwpd->sc_pd;
506 	if (udata) {
507 		struct irdma_ucontext *ucontext = to_ucontext(context);
508 
509 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
510 		uresp.pd_id = pd_id;
511 		if (ib_copy_to_udata(udata, &uresp,
512 				     min(sizeof(uresp), udata->outlen))) {
513 			err = -EFAULT;
514 			goto error;
515 		}
516 	} else {
517 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
518 	}
519 
520 	spin_lock_init(&iwpd->udqp_list_lock);
521 	INIT_LIST_HEAD(&iwpd->udqp_list);
522 
523 	return &iwpd->ibpd;
524 
525 error:
526 	kfree(iwpd);
527 free_res:
528 
529 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
530 
531 	return ERR_PTR(err);
532 }
533 
534 #endif
535 
536 #if __FreeBSD_version >= 1400026
537 void
538 irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
539 {
540 	struct irdma_pd *iwpd = to_iwpd(ibpd);
541 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
542 
543 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
544 }
545 
546 #endif
547 
548 #if __FreeBSD_version < 1400026
549 int
550 irdma_dealloc_pd(struct ib_pd *ibpd)
551 {
552 	struct irdma_pd *iwpd = to_iwpd(ibpd);
553 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
554 
555 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
556 	kfree(iwpd);
557 	return 0;
558 }
559 #endif
560 
561 /**
562  * irdma_find_qp_update_qs - update QS handle for UD QPs
563  * @rf: RDMA PCI function
564  * @pd: protection domain object
565  * @user_pri: selected user priority
566  */
567 static void
568 irdma_find_qp_update_qs(struct irdma_pci_f *rf,
569 			struct irdma_pd *pd, u8 user_pri)
570 {
571 	struct irdma_qp *iwqp;
572 	struct list_head *tmp_node, *list_node;
573 	struct irdma_udqs_work *work;
574 	unsigned long flags;
575 	bool qs_change;
576 
577 	spin_lock_irqsave(&pd->udqp_list_lock, flags);
578 	list_for_each_safe(list_node, tmp_node, &pd->udqp_list) {
579 		qs_change = true;
580 		iwqp = list_entry(list_node, struct irdma_qp, ud_list_elem);
581 		irdma_qp_add_ref(&iwqp->ibqp);
582 		/* check if qs_handle needs to be changed */
583 		if (iwqp->sc_qp.qs_handle == iwqp->sc_qp.vsi->qos[user_pri].qs_handle) {
584 			if (iwqp->ctx_info.user_pri == user_pri) {
585 				/* qs_handle and user_pri don't change */
586 				irdma_qp_rem_ref(&iwqp->ibqp);
587 				continue;
588 			}
589 			qs_change = false;
590 		}
591 		/* perform qp qos change */
592 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
593 		if (!work) {
594 			irdma_qp_rem_ref(&iwqp->ibqp);
595 			spin_unlock_irqrestore(&pd->udqp_list_lock, flags);
596 			return;
597 		}
598 		work->iwqp = iwqp;
599 		work->user_prio = user_pri;
600 		work->qs_change = qs_change;
601 		INIT_WORK(&work->work, irdma_udqp_qs_worker);
602 		if (qs_change)
603 			irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND);
604 		queue_work(rf->iwdev->cleanup_wq, &work->work);
605 	}
606 	spin_unlock_irqrestore(&pd->udqp_list_lock, flags);
607 }
608 
609 static void
610 irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info,
611 		   const struct ib_gid_attr *sgid_attr,
612 		   struct sockaddr *sgid_addr, struct sockaddr *dgid_addr,
613 		   u8 *dmac, u8 net_type)
614 {
615 	if (net_type == RDMA_NETWORK_IPV4) {
616 		ah_info->ipv4_valid = true;
617 		ah_info->dest_ip_addr[0] =
618 		    ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr);
619 		ah_info->src_ip_addr[0] =
620 		    ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr);
621 		CURVNET_SET_QUIET(vnet);
622 		ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
623 						     ah_info->dest_ip_addr[0]);
624 		CURVNET_RESTORE();
625 		if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) {
626 			irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac);
627 		}
628 	} else {
629 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
630 				    ((struct sockaddr_in6 *)dgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
631 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
632 				    ((struct sockaddr_in6 *)sgid_addr)->sin6_addr.__u6_addr.__u6_addr32);
633 		ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
634 						     ah_info->dest_ip_addr);
635 		if (rdma_is_multicast_addr(&((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) {
636 			irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac);
637 		}
638 	}
639 }
640 
641 static inline u8 irdma_get_vlan_ndev_prio(struct ifnet *ndev, u8 prio){
642 	return prio;
643 }
644 
645 static int
646 irdma_create_ah_vlan_tag(struct irdma_device *iwdev,
647 			 struct irdma_pd *pd,
648 			 struct irdma_ah_info *ah_info,
649 			 const struct ib_gid_attr *sgid_attr,
650 			 u8 *dmac)
651 {
652 	u16 vlan_prio;
653 
654 	if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev))
655 		ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev);
656 	else
657 		ah_info->vlan_tag = VLAN_N_VID;
658 
659 	ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac);
660 
661 	if (ah_info->dst_arpindex == -1)
662 		return -EINVAL;
663 
664 	if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
665 		ah_info->vlan_tag = 0;
666 
667 	if (ah_info->vlan_tag < VLAN_N_VID) {
668 		struct ifnet *ndev = sgid_attr->ndev;
669 
670 		ah_info->insert_vlan_tag = true;
671 		vlan_prio = (u16)irdma_get_vlan_ndev_prio(ndev, rt_tos2priority(ah_info->tc_tos));
672 		ah_info->vlan_tag |= vlan_prio << VLAN_PRIO_SHIFT;
673 		irdma_find_qp_update_qs(iwdev->rf, pd, vlan_prio);
674 	}
675 	if (iwdev->roce_dcqcn_en) {
676 		ah_info->tc_tos &= ~ECN_CODE_PT_MASK;
677 		ah_info->tc_tos |= ECN_CODE_PT_VAL;
678 	}
679 
680 	return 0;
681 }
682 
683 static int
684 irdma_create_ah_wait(struct irdma_pci_f *rf,
685 		     struct irdma_sc_ah *sc_ah, bool sleep)
686 {
687 	if (!sleep) {
688 		int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
689 		CQP_TIMEOUT_THRESHOLD;
690 
691 		do {
692 			irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
693 			mdelay(1);
694 		} while (!sc_ah->ah_info.ah_valid && --cnt);
695 
696 		if (!cnt)
697 			return -ETIMEDOUT;
698 	}
699 	return 0;
700 }
701 
702 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
703 
704 #if __FreeBSD_version >= 1400026
705 /**
706  * irdma_create_ah - create address handle
707  * @ib_ah: ptr to AH
708  * @attr: address handle attributes
709  * @flags: AH flags to wait
710  * @udata: user data
711  *
712  * returns 0 on success, error otherwise
713  */
714 int
715 irdma_create_ah(struct ib_ah *ib_ah,
716 		struct ib_ah_attr *attr, u32 flags,
717 		struct ib_udata *udata)
718 {
719 	struct irdma_pd *pd = to_iwpd(ib_ah->pd);
720 	struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah);
721 	struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device);
722 	union ib_gid sgid;
723 	struct ib_gid_attr sgid_attr;
724 	struct irdma_pci_f *rf = iwdev->rf;
725 	struct irdma_sc_ah *sc_ah;
726 	u32 ah_id = 0;
727 	struct irdma_ah_info *ah_info;
728 	struct irdma_create_ah_resp uresp;
729 	union {
730 		struct sockaddr saddr;
731 		struct sockaddr_in saddr_in;
732 		struct sockaddr_in6 saddr_in6;
733 	} sgid_addr, dgid_addr;
734 	int err;
735 	u8 dmac[ETH_ALEN];
736 	bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0;
737 
738 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
739 		return -EINVAL;
740 
741 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
742 			       rf->max_ah, &ah_id, &rf->next_ah);
743 
744 	if (err)
745 		return err;
746 
747 	ah->pd = pd;
748 	sc_ah = &ah->sc_ah;
749 	sc_ah->ah_info.ah_idx = ah_id;
750 	sc_ah->ah_info.vsi = &iwdev->vsi;
751 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
752 	ah->sgid_index = attr->grh.sgid_index;
753 	memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
754 	rcu_read_lock();
755 	err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
756 				attr->grh.sgid_index, &sgid, &sgid_attr);
757 	rcu_read_unlock();
758 	if (err) {
759 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
760 			    "GID lookup at idx=%d with port=%d failed\n",
761 			    attr->grh.sgid_index, attr->port_num);
762 		err = -EINVAL;
763 		goto err_gid_l2;
764 	}
765 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
766 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
767 	ah->av.attrs = *attr;
768 	ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
769 							sgid_attr.gid_type,
770 							&sgid);
771 
772 	if (sgid_attr.ndev)
773 		dev_put(sgid_attr.ndev);
774 
775 	ah->av.sgid_addr.saddr = sgid_addr.saddr;
776 	ah->av.dgid_addr.saddr = dgid_addr.saddr;
777 	ah_info = &sc_ah->ah_info;
778 	ah_info->ah_idx = ah_id;
779 	ah_info->pd_idx = pd->sc_pd.pd_id;
780 	ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
781 
782 	if (attr->ah_flags & IB_AH_GRH) {
783 		ah_info->flow_label = attr->grh.flow_label;
784 		ah_info->hop_ttl = attr->grh.hop_limit;
785 		ah_info->tc_tos = attr->grh.traffic_class;
786 	}
787 
788 	ether_addr_copy(dmac, attr->dmac);
789 
790 	irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
791 			   dmac, ah->av.net_type);
792 
793 	err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
794 	if (err)
795 		goto err_gid_l2;
796 
797 	err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
798 			      sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
799 	if (err) {
800 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP-OP Create AH fail");
801 		goto err_gid_l2;
802 	}
803 
804 	err = irdma_create_ah_wait(rf, sc_ah, sleep);
805 	if (err) {
806 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
807 		goto err_gid_l2;
808 	}
809 
810 	if (udata) {
811 		uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
812 		err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
813 		if (err) {
814 			irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
815 					IRDMA_OP_AH_DESTROY, false, NULL, ah);
816 			goto err_gid_l2;
817 		}
818 	}
819 
820 	return 0;
821 err_gid_l2:
822 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
823 
824 	return err;
825 }
826 #endif
827 
828 void
829 irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
830 {
831 	ether_addr_copy(dmac, attr->dmac);
832 }
833 
834 #if __FreeBSD_version < 1400026
835 struct ib_ah *
836 irdma_create_ah_stub(struct ib_pd *ibpd,
837 		     struct ib_ah_attr *attr,
838 		     struct ib_udata *udata)
839 #else
840 int
841 irdma_create_ah_stub(struct ib_ah *ib_ah,
842 		     struct ib_ah_attr *attr, u32 flags,
843 		     struct ib_udata *udata)
844 #endif
845 {
846 #if __FreeBSD_version >= 1400026
847 	return -ENOSYS;
848 #else
849 	return ERR_PTR(-ENOSYS);
850 #endif
851 }
852 
853 #if __FreeBSD_version >= 1400026
854 void
855 irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
856 {
857 	return;
858 }
859 #else
860 int
861 irdma_destroy_ah_stub(struct ib_ah *ibah)
862 {
863 	return -ENOSYS;
864 }
865 #endif
866 
867 #if __FreeBSD_version < 1400026
868 /**
869  * irdma_create_ah - create address handle
870  * @ibpd: ptr to pd
871  * @attr: address handle attributes
872  * @udata: user data
873  *
874  * returns a pointer to an address handle
875  */
876 struct ib_ah *
877 irdma_create_ah(struct ib_pd *ibpd,
878 		struct ib_ah_attr *attr,
879 		struct ib_udata *udata)
880 {
881 	struct irdma_pd *pd = to_iwpd(ibpd);
882 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
883 	struct irdma_ah *ah;
884 	union ib_gid sgid;
885 	struct ib_gid_attr sgid_attr;
886 	struct irdma_pci_f *rf = iwdev->rf;
887 	struct irdma_sc_ah *sc_ah;
888 	u32 ah_id = 0;
889 	struct irdma_ah_info *ah_info;
890 	struct irdma_create_ah_resp uresp;
891 	union {
892 		struct sockaddr saddr;
893 		struct sockaddr_in saddr_in;
894 		struct sockaddr_in6 saddr_in6;
895 	} sgid_addr, dgid_addr;
896 	int err;
897 	u8 dmac[ETH_ALEN];
898 	bool sleep = udata ? true : false;
899 
900 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
901 		return ERR_PTR(-EINVAL);
902 
903 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
904 			       rf->max_ah, &ah_id, &rf->next_ah);
905 
906 	if (err)
907 		return ERR_PTR(err);
908 
909 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
910 	if (!ah) {
911 		irdma_free_rsrc(rf, rf->allocated_ahs, ah_id);
912 		return ERR_PTR(-ENOMEM);
913 	}
914 
915 	ah->pd = pd;
916 	sc_ah = &ah->sc_ah;
917 	sc_ah->ah_info.ah_idx = ah_id;
918 	sc_ah->ah_info.vsi = &iwdev->vsi;
919 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
920 	ah->sgid_index = attr->grh.sgid_index;
921 	memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
922 	rcu_read_lock();
923 	err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
924 				attr->grh.sgid_index, &sgid, &sgid_attr);
925 	rcu_read_unlock();
926 	if (err) {
927 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
928 			    "GID lookup at idx=%d with port=%d failed\n",
929 			    attr->grh.sgid_index, attr->port_num);
930 		err = -EINVAL;
931 		goto err_gid_l2;
932 	}
933 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
934 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
935 	ah->av.attrs = *attr;
936 	ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr,
937 							sgid_attr.gid_type,
938 							&sgid);
939 
940 	if (sgid_attr.ndev)
941 		dev_put(sgid_attr.ndev);
942 
943 	ah->av.sgid_addr.saddr = sgid_addr.saddr;
944 	ah->av.dgid_addr.saddr = dgid_addr.saddr;
945 	ah_info = &sc_ah->ah_info;
946 	ah_info->ah_idx = ah_id;
947 	ah_info->pd_idx = pd->sc_pd.pd_id;
948 
949 	ether_addr_copy(ah_info->mac_addr, IF_LLADDR(iwdev->netdev));
950 	if (attr->ah_flags & IB_AH_GRH) {
951 		ah_info->flow_label = attr->grh.flow_label;
952 		ah_info->hop_ttl = attr->grh.hop_limit;
953 		ah_info->tc_tos = attr->grh.traffic_class;
954 	}
955 
956 	if (udata)
957 		ib_resolve_eth_dmac(ibpd->device, attr);
958 	irdma_ether_copy(dmac, attr);
959 
960 	irdma_fill_ah_info(iwdev->netdev->if_vnet, ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr,
961 			   dmac, ah->av.net_type);
962 
963 	err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
964 	if (err)
965 		goto err_gid_l2;
966 
967 	err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
968 			      sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
969 	if (err) {
970 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "CQP-OP Create AH fail");
971 		goto err_gid_l2;
972 	}
973 
974 	err = irdma_create_ah_wait(rf, sc_ah, sleep);
975 	if (err) {
976 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
977 		goto err_gid_l2;
978 	}
979 
980 	if (udata) {
981 		uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
982 		err = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
983 		if (err) {
984 			irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
985 					IRDMA_OP_AH_DESTROY, false, NULL, ah);
986 			goto err_gid_l2;
987 		}
988 	}
989 
990 	return &ah->ibah;
991 err_gid_l2:
992 	kfree(ah);
993 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
994 
995 	return ERR_PTR(err);
996 }
997 #endif
998 
999 /**
1000  * irdma_free_qp_rsrc - free up memory resources for qp
1001  * @iwqp: qp ptr (user or kernel)
1002  */
1003 void
1004 irdma_free_qp_rsrc(struct irdma_qp *iwqp)
1005 {
1006 	struct irdma_device *iwdev = iwqp->iwdev;
1007 	struct irdma_pci_f *rf = iwdev->rf;
1008 	u32 qp_num = iwqp->ibqp.qp_num;
1009 
1010 	irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
1011 	irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1012 	if (iwqp->sc_qp.vsi) {
1013 		irdma_qp_rem_qos(&iwqp->sc_qp);
1014 		iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
1015 					   iwqp->sc_qp.user_pri);
1016 	}
1017 
1018 	if (qp_num > 2)
1019 		irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1020 	irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem);
1021 	irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem);
1022 	kfree(iwqp->kqp.sig_trk_mem);
1023 	iwqp->kqp.sig_trk_mem = NULL;
1024 	kfree(iwqp->kqp.sq_wrid_mem);
1025 	kfree(iwqp->kqp.rq_wrid_mem);
1026 	kfree(iwqp->sg_list);
1027 	kfree(iwqp);
1028 }
1029 
1030 /**
1031  * irdma_create_qp - create qp
1032  * @ibpd: ptr of pd
1033  * @init_attr: attributes for qp
1034  * @udata: user data for create qp
1035  */
1036 struct ib_qp *
1037 irdma_create_qp(struct ib_pd *ibpd,
1038 		struct ib_qp_init_attr *init_attr,
1039 		struct ib_udata *udata)
1040 {
1041 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
1042 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
1043 	struct irdma_pd *iwpd = to_iwpd(ibpd);
1044 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
1045 	struct irdma_pci_f *rf = iwdev->rf;
1046 	struct irdma_qp *iwqp;
1047 	struct irdma_create_qp_resp uresp = {0};
1048 	u32 qp_num = 0;
1049 	int ret;
1050 	int err_code;
1051 	struct irdma_sc_qp *qp;
1052 	struct irdma_sc_dev *dev = &rf->sc_dev;
1053 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
1054 	struct irdma_qp_init_info init_info = {{0}};
1055 	struct irdma_qp_host_ctx_info *ctx_info;
1056 	unsigned long flags;
1057 
1058 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
1059 	if (err_code)
1060 		return ERR_PTR(err_code);
1061 
1062 	if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
1063 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
1064 		return ERR_PTR(-EINVAL);
1065 
1066 	init_info.vsi = &iwdev->vsi;
1067 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
1068 	init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
1069 	init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
1070 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
1071 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
1072 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
1073 
1074 	iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
1075 	if (!iwqp)
1076 		return ERR_PTR(-ENOMEM);
1077 
1078 	iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list),
1079 				GFP_KERNEL);
1080 	if (!iwqp->sg_list) {
1081 		kfree(iwqp);
1082 		return ERR_PTR(-ENOMEM);
1083 	}
1084 
1085 	qp = &iwqp->sc_qp;
1086 	qp->qp_uk.back_qp = iwqp;
1087 	qp->qp_uk.lock = &iwqp->lock;
1088 	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1089 
1090 	iwqp->iwdev = iwdev;
1091 	iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE;
1092 	iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem,
1093 						     iwqp->q2_ctx_mem.size,
1094 						     256);
1095 	if (!iwqp->q2_ctx_mem.va) {
1096 		kfree(iwqp->sg_list);
1097 		kfree(iwqp);
1098 		return ERR_PTR(-ENOMEM);
1099 	}
1100 
1101 	init_info.q2 = iwqp->q2_ctx_mem.va;
1102 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
1103 	init_info.host_ctx = (__le64 *) (init_info.q2 + IRDMA_Q2_BUF_SIZE);
1104 	init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
1105 
1106 	if (init_attr->qp_type == IB_QPT_GSI)
1107 		qp_num = 1;
1108 	else
1109 		err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
1110 					    &qp_num, &rf->next_qp);
1111 	if (err_code)
1112 		goto error;
1113 
1114 	iwqp->iwpd = iwpd;
1115 	iwqp->ibqp.qp_num = qp_num;
1116 	qp = &iwqp->sc_qp;
1117 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
1118 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
1119 	iwqp->host_ctx.va = init_info.host_ctx;
1120 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
1121 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
1122 
1123 	init_info.pd = &iwpd->sc_pd;
1124 	init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
1125 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
1126 		init_info.qp_uk_init_info.first_sq_wq = 1;
1127 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
1128 	init_waitqueue_head(&iwqp->waitq);
1129 	init_waitqueue_head(&iwqp->mod_qp_waitq);
1130 
1131 	if (udata) {
1132 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
1133 		err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr);
1134 	} else {
1135 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
1136 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
1137 		err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
1138 	}
1139 
1140 	if (err_code) {
1141 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "setup qp failed\n");
1142 		goto error;
1143 	}
1144 
1145 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1146 		if (init_attr->qp_type == IB_QPT_RC) {
1147 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
1148 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1149 			    IRDMA_WRITE_WITH_IMM |
1150 			    IRDMA_ROCE;
1151 		} else {
1152 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
1153 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1154 			    IRDMA_ROCE;
1155 		}
1156 	} else {
1157 		init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
1158 		init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
1159 	}
1160 
1161 	ret = irdma_sc_qp_init(qp, &init_info);
1162 	if (ret) {
1163 		err_code = -EPROTO;
1164 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "qp_init fail\n");
1165 		goto error;
1166 	}
1167 
1168 	ctx_info = &iwqp->ctx_info;
1169 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1170 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1171 
1172 	if (rdma_protocol_roce(&iwdev->ibdev, 1))
1173 		irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
1174 	else
1175 		irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1176 
1177 	err_code = irdma_cqp_create_qp_cmd(iwqp);
1178 	if (err_code)
1179 		goto error;
1180 
1181 	atomic_set(&iwqp->refcnt, 1);
1182 	spin_lock_init(&iwqp->lock);
1183 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1184 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
1185 	rf->qp_table[qp_num] = iwqp;
1186 
1187 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1188 		if (dev->ws_add(&iwdev->vsi, 0)) {
1189 			irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1190 			err_code = -EINVAL;
1191 			goto error;
1192 		}
1193 
1194 		irdma_qp_add_qos(&iwqp->sc_qp);
1195 		spin_lock_irqsave(&iwpd->udqp_list_lock, flags);
1196 		if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD)
1197 			list_add_tail(&iwqp->ud_list_elem, &iwpd->udqp_list);
1198 		spin_unlock_irqrestore(&iwpd->udqp_list_lock, flags);
1199 	}
1200 
1201 	if (udata) {
1202 		/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
1203 		if (udata->outlen < sizeof(uresp)) {
1204 			uresp.lsmm = 1;
1205 			uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
1206 		} else {
1207 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1208 				uresp.lsmm = 1;
1209 		}
1210 		uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
1211 		uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
1212 		uresp.qp_id = qp_num;
1213 		uresp.qp_caps = qp->qp_uk.qp_caps;
1214 
1215 		err_code = ib_copy_to_udata(udata, &uresp,
1216 					    min(sizeof(uresp), udata->outlen));
1217 		if (err_code) {
1218 			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
1219 			kc_irdma_destroy_qp(&iwqp->ibqp, udata);
1220 			return ERR_PTR(err_code);
1221 		}
1222 	}
1223 
1224 	init_completion(&iwqp->free_qp);
1225 	return &iwqp->ibqp;
1226 
1227 error:
1228 	irdma_free_qp_rsrc(iwqp);
1229 
1230 	return ERR_PTR(err_code);
1231 }
1232 
1233 /**
1234  * irdma_destroy_qp - destroy qp
1235  * @ibqp: qp's ib pointer also to get to device's qp address
1236  * @udata: user data
1237  */
1238 #if __FreeBSD_version >= 1400026
1239 int
1240 irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1241 #else
1242 int
1243 irdma_destroy_qp(struct ib_qp *ibqp)
1244 #endif
1245 {
1246 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1247 	struct irdma_device *iwdev = iwqp->iwdev;
1248 	unsigned long flags;
1249 
1250 	if (iwqp->sc_qp.qp_uk.destroy_pending)
1251 		goto free_rsrc;
1252 	iwqp->sc_qp.qp_uk.destroy_pending = true;
1253 
1254 	spin_lock_irqsave(&iwqp->iwpd->udqp_list_lock, flags);
1255 	if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD)
1256 		list_del(&iwqp->ud_list_elem);
1257 	spin_unlock_irqrestore(&iwqp->iwpd->udqp_list_lock, flags);
1258 
1259 	if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
1260 		irdma_modify_qp_to_err(&iwqp->sc_qp);
1261 
1262 	irdma_qp_rem_ref(&iwqp->ibqp);
1263 	wait_for_completion(&iwqp->free_qp);
1264 	irdma_free_lsmm_rsrc(iwqp);
1265 	if (!iwdev->rf->reset && irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp))
1266 		return (iwdev->rf->rdma_ver <= IRDMA_GEN_2 && !iwqp->user_mode) ? 0 : -ENOTRECOVERABLE;
1267 free_rsrc:
1268 	if (!iwqp->user_mode) {
1269 		if (iwqp->iwscq) {
1270 			irdma_clean_cqes(iwqp, iwqp->iwscq);
1271 			if (iwqp->iwrcq != iwqp->iwscq)
1272 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
1273 		}
1274 	}
1275 	irdma_remove_push_mmap_entries(iwqp);
1276 	irdma_free_qp_rsrc(iwqp);
1277 
1278 	return 0;
1279 }
1280 
1281 /**
1282  * irdma_create_cq - create cq
1283  * @ibcq: CQ allocated
1284  * @attr: attributes for cq
1285  * @udata: user data
1286  */
1287 #if __FreeBSD_version >= 1400026
1288 int
1289 irdma_create_cq(struct ib_cq *ibcq,
1290 		const struct ib_cq_init_attr *attr,
1291 		struct ib_udata *udata)
1292 #else
1293 struct ib_cq *
1294 irdma_create_cq(struct ib_device *ibdev,
1295 		const struct ib_cq_init_attr *attr,
1296 		struct ib_ucontext *context,
1297 		struct ib_udata *udata)
1298 #endif
1299 {
1300 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
1301 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
1302 #if __FreeBSD_version >= 1400026
1303 	struct ib_device *ibdev = ibcq->device;
1304 #endif
1305 	struct irdma_device *iwdev = to_iwdev(ibdev);
1306 	struct irdma_pci_f *rf = iwdev->rf;
1307 #if __FreeBSD_version >= 1400026
1308 	struct irdma_cq *iwcq = to_iwcq(ibcq);
1309 #else
1310 	struct irdma_cq *iwcq;
1311 #endif
1312 	u32 cq_num = 0;
1313 	struct irdma_sc_cq *cq;
1314 	struct irdma_sc_dev *dev = &rf->sc_dev;
1315 	struct irdma_cq_init_info info = {0};
1316 	int status;
1317 	struct irdma_cqp_request *cqp_request;
1318 	struct cqp_cmds_info *cqp_info;
1319 	struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1320 	unsigned long flags;
1321 	int err_code;
1322 	int entries = attr->cqe;
1323 	bool cqe_64byte_ena;
1324 
1325 #if __FreeBSD_version >= 1400026
1326 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1327 	if (err_code)
1328 		return err_code;
1329 
1330 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1331 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1332 		return -EINVAL;
1333 #else
1334 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1335 	if (err_code)
1336 		return ERR_PTR(err_code);
1337 
1338 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1339 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1340 		return ERR_PTR(-EINVAL);
1341 
1342 	iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1343 	if (!iwcq)
1344 		return ERR_PTR(-ENOMEM);
1345 #endif
1346 	err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1347 				    &rf->next_cq);
1348 	if (err_code)
1349 #if __FreeBSD_version >= 1400026
1350 		return err_code;
1351 #else
1352 		goto error;
1353 #endif
1354 	cq = &iwcq->sc_cq;
1355 	cq->back_cq = iwcq;
1356 	atomic_set(&iwcq->refcnt, 1);
1357 	spin_lock_init(&iwcq->lock);
1358 	INIT_LIST_HEAD(&iwcq->resize_list);
1359 	INIT_LIST_HEAD(&iwcq->cmpl_generated);
1360 	info.dev = dev;
1361 	ukinfo->cq_size = max(entries, 4);
1362 	ukinfo->cq_id = cq_num;
1363 	cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false;
1364 	ukinfo->avoid_mem_cflct = cqe_64byte_ena;
1365 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1366 	atomic_set(&iwcq->armed, 0);
1367 	if (attr->comp_vector < rf->ceqs_count)
1368 		info.ceq_id = attr->comp_vector;
1369 	info.ceq_id_valid = true;
1370 	info.ceqe_mask = 1;
1371 	info.type = IRDMA_CQ_TYPE_IWARP;
1372 	info.vsi = &iwdev->vsi;
1373 
1374 	if (udata) {
1375 		struct irdma_ucontext *ucontext;
1376 		struct irdma_create_cq_req req = {0};
1377 		struct irdma_cq_mr *cqmr;
1378 		struct irdma_pbl *iwpbl;
1379 		struct irdma_pbl *iwpbl_shadow;
1380 		struct irdma_cq_mr *cqmr_shadow;
1381 
1382 		iwcq->user_mode = true;
1383 #if __FreeBSD_version >= 1400026
1384 		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1385 #else
1386 		ucontext = to_ucontext(context);
1387 #endif
1388 
1389 		if (ib_copy_from_udata(&req, udata,
1390 				       min(sizeof(req), udata->inlen))) {
1391 			err_code = -EFAULT;
1392 			goto cq_free_rsrc;
1393 		}
1394 
1395 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1396 		iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
1397 				      &ucontext->cq_reg_mem_list);
1398 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1399 		if (!iwpbl) {
1400 			err_code = -EPROTO;
1401 			goto cq_free_rsrc;
1402 		}
1403 		iwcq->iwpbl = iwpbl;
1404 		iwcq->cq_mem_size = 0;
1405 		cqmr = &iwpbl->cq_mr;
1406 
1407 		if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1408 		    IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
1409 			spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1410 			iwpbl_shadow = irdma_get_pbl((unsigned long)req.user_shadow_area,
1411 						     &ucontext->cq_reg_mem_list);
1412 			spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1413 
1414 			if (!iwpbl_shadow) {
1415 				err_code = -EPROTO;
1416 				goto cq_free_rsrc;
1417 			}
1418 			iwcq->iwpbl_shadow = iwpbl_shadow;
1419 			cqmr_shadow = &iwpbl_shadow->cq_mr;
1420 			info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
1421 			cqmr->split = true;
1422 		} else {
1423 			info.shadow_area_pa = cqmr->shadow;
1424 		}
1425 		if (iwpbl->pbl_allocated) {
1426 			info.virtual_map = true;
1427 			info.pbl_chunk_size = 1;
1428 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1429 		} else {
1430 			info.cq_base_pa = cqmr->cq_pbl.addr;
1431 		}
1432 	} else {
1433 		/* Kmode allocations */
1434 		int rsize;
1435 
1436 		if (entries < 1 || entries > rf->max_cqe) {
1437 			err_code = -EINVAL;
1438 			goto cq_free_rsrc;
1439 		}
1440 
1441 		entries++;
1442 		if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1443 			entries *= 2;
1444 		ukinfo->cq_size = entries;
1445 
1446 		if (cqe_64byte_ena)
1447 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
1448 		else
1449 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
1450 		iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE);
1451 		iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem,
1452 						       iwcq->kmem.size, IRDMA_HW_PAGE_SIZE);
1453 		if (!iwcq->kmem.va) {
1454 			err_code = -ENOMEM;
1455 			goto cq_free_rsrc;
1456 		}
1457 
1458 		iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3;
1459 		iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw,
1460 							      &iwcq->kmem_shadow,
1461 							      iwcq->kmem_shadow.size,
1462 							      64);
1463 
1464 		if (!iwcq->kmem_shadow.va) {
1465 			err_code = -ENOMEM;
1466 			goto cq_free_rsrc;
1467 		}
1468 		info.shadow_area_pa = iwcq->kmem_shadow.pa;
1469 		ukinfo->shadow_area = iwcq->kmem_shadow.va;
1470 		ukinfo->cq_base = iwcq->kmem.va;
1471 		info.cq_base_pa = iwcq->kmem.pa;
1472 	}
1473 
1474 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1475 		info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
1476 						 (u32)IRDMA_MAX_CQ_READ_THRESH);
1477 	if (irdma_sc_cq_init(cq, &info)) {
1478 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "init cq fail\n");
1479 		err_code = -EPROTO;
1480 		goto cq_free_rsrc;
1481 	}
1482 
1483 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1484 	if (!cqp_request) {
1485 		err_code = -ENOMEM;
1486 		goto cq_free_rsrc;
1487 	}
1488 	cqp_info = &cqp_request->info;
1489 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1490 	cqp_info->post_sq = 1;
1491 	cqp_info->in.u.cq_create.cq = cq;
1492 	cqp_info->in.u.cq_create.check_overflow = true;
1493 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1494 	status = irdma_handle_cqp_op(rf, cqp_request);
1495 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1496 	if (status) {
1497 		err_code = -ENOMEM;
1498 		goto cq_free_rsrc;
1499 	}
1500 
1501 	if (udata) {
1502 		struct irdma_create_cq_resp resp = {0};
1503 
1504 		resp.cq_id = info.cq_uk_init_info.cq_id;
1505 		resp.cq_size = info.cq_uk_init_info.cq_size;
1506 		if (ib_copy_to_udata(udata, &resp,
1507 				     min(sizeof(resp), udata->outlen))) {
1508 			irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy to user data\n");
1509 			err_code = -EPROTO;
1510 			goto cq_destroy;
1511 		}
1512 	}
1513 
1514 	rf->cq_table[cq_num] = iwcq;
1515 	init_completion(&iwcq->free_cq);
1516 
1517 #if __FreeBSD_version >= 1400026
1518 	return 0;
1519 #else
1520 	return &iwcq->ibcq;
1521 #endif
1522 cq_destroy:
1523 	irdma_cq_wq_destroy(rf, cq);
1524 cq_free_rsrc:
1525 	irdma_cq_free_rsrc(rf, iwcq);
1526 #if __FreeBSD_version >= 1400026
1527 	return err_code;
1528 #else
1529 error:
1530 	kfree(iwcq);
1531 	return ERR_PTR(err_code);
1532 #endif
1533 }
1534 
1535 /**
1536  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
1537  * @iwmr: iwmr for IB's user page addresses
1538  * @pbl: ple pointer to save 1 level or 0 level pble
1539  * @level: indicated level 0, 1 or 2
1540  */
1541 
1542 void
1543 irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
1544 			enum irdma_pble_level level)
1545 {
1546 	struct ib_umem *region = iwmr->region;
1547 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1548 	int chunk_pages, entry, i;
1549 	struct scatterlist *sg;
1550 	u64 pg_addr = 0;
1551 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1552 	struct irdma_pble_info *pinfo;
1553 	u32 idx = 0;
1554 	u32 pbl_cnt = 0;
1555 
1556 	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
1557 	for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1558 		chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size);
1559 		if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page)
1560 			iwpbl->qp_mr.sq_page = sg_page(sg);
1561 		for (i = 0; i < chunk_pages; i++) {
1562 			pg_addr = sg_dma_address(sg) + (i * iwmr->page_size);
1563 			if ((entry + i) == 0)
1564 				*pbl = pg_addr & iwmr->page_msk;
1565 			else if (!(pg_addr & ~iwmr->page_msk))
1566 				*pbl = pg_addr;
1567 			else
1568 				continue;
1569 			if (++pbl_cnt == palloc->total_cnt)
1570 				break;
1571 			pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
1572 		}
1573 	}
1574 }
1575 
1576 /**
1577  * irdma_destroy_ah - Destroy address handle
1578  * @ibah: pointer to address handle
1579  * @ah_flags: destroy flags
1580  */
1581 
1582 #if __FreeBSD_version >= 1400026
1583 void
1584 irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
1585 {
1586 	struct irdma_device *iwdev = to_iwdev(ibah->device);
1587 	struct irdma_ah *ah = to_iwah(ibah);
1588 
1589 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1590 			false, NULL, ah);
1591 
1592 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1593 			ah->sc_ah.ah_info.ah_idx);
1594 }
1595 #endif
1596 
1597 #if __FreeBSD_version < 1400026
1598 int
1599 irdma_destroy_ah(struct ib_ah *ibah)
1600 {
1601 	struct irdma_device *iwdev = to_iwdev(ibah->device);
1602 	struct irdma_ah *ah = to_iwah(ibah);
1603 
1604 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
1605 			false, NULL, ah);
1606 
1607 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
1608 			ah->sc_ah.ah_info.ah_idx);
1609 
1610 	kfree(ah);
1611 	return 0;
1612 }
1613 #endif
1614 
1615 #if __FreeBSD_version >= 1400026
1616 int
1617 irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1618 #else
1619 int
1620 irdma_dereg_mr(struct ib_mr *ib_mr)
1621 #endif
1622 {
1623 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
1624 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1625 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1626 	int ret;
1627 
1628 	if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
1629 		if (iwmr->region) {
1630 			struct irdma_ucontext *ucontext;
1631 #if __FreeBSD_version >= 1400026
1632 
1633 			ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1634 
1635 #else
1636 			struct ib_pd *ibpd = ib_mr->pd;
1637 
1638 			ucontext = to_ucontext(ibpd->uobject->context);
1639 #endif
1640 			irdma_del_memlist(iwmr, ucontext);
1641 		}
1642 		goto done;
1643 	}
1644 
1645 	ret = irdma_hwdereg_mr(ib_mr);
1646 	if (ret)
1647 		return ret;
1648 
1649 	irdma_free_stag(iwdev, iwmr->stag);
1650 done:
1651 	if (iwpbl->pbl_allocated)
1652 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
1653 
1654 	if (iwmr->region)
1655 		ib_umem_release(iwmr->region);
1656 
1657 	kfree(iwmr);
1658 
1659 	return 0;
1660 }
1661 
1662 /*
1663  * irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to
1664  * indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address
1665  * @new access flags: bit mask of access flags @new_pd: ptr of pd @udata: user data
1666  */
1667 int
1668 irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len,
1669 		    u64 virt, int new_access, struct ib_pd *new_pd,
1670 		    struct ib_udata *udata)
1671 {
1672 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
1673 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
1674 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1675 	int ret;
1676 
1677 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
1678 		return -EINVAL;
1679 
1680 	if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1681 		return -EOPNOTSUPP;
1682 
1683 	ret = irdma_hwdereg_mr(ib_mr);
1684 	if (ret)
1685 		return ret;
1686 
1687 	if (flags & IB_MR_REREG_ACCESS)
1688 		iwmr->access = new_access;
1689 
1690 	if (flags & IB_MR_REREG_PD) {
1691 		iwmr->ibmr.pd = new_pd;
1692 		iwmr->ibmr.device = new_pd->device;
1693 	}
1694 
1695 	if (flags & IB_MR_REREG_TRANS) {
1696 		if (iwpbl->pbl_allocated) {
1697 			irdma_free_pble(iwdev->rf->pble_rsrc,
1698 					&iwpbl->pble_alloc);
1699 			iwpbl->pbl_allocated = false;
1700 		}
1701 		if (iwmr->region) {
1702 			ib_umem_release(iwmr->region);
1703 			iwmr->region = NULL;
1704 		}
1705 
1706 		ib_mr = irdma_rereg_mr_trans(iwmr, start, len, virt, udata);
1707 		if (IS_ERR(ib_mr))
1708 			return PTR_ERR(ib_mr);
1709 
1710 	} else {
1711 		ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
1712 		if (ret)
1713 			return ret;
1714 	}
1715 
1716 	return 0;
1717 }
1718 
1719 int
1720 kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr,
1721 			  u16 *vlan_id)
1722 {
1723 	int ret;
1724 	union ib_gid sgid;
1725 	struct ib_gid_attr sgid_attr;
1726 	struct irdma_av *av = &iwqp->roce_ah.av;
1727 
1728 	ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num,
1729 				attr->ah_attr.grh.sgid_index, &sgid,
1730 				&sgid_attr);
1731 	if (ret)
1732 		return ret;
1733 
1734 	if (sgid_attr.ndev) {
1735 		*vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
1736 		ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, IF_LLADDR(sgid_attr.ndev));
1737 	}
1738 
1739 	av->net_type = kc_rdma_gid_attr_network_type(sgid_attr,
1740 						     sgid_attr.gid_type,
1741 						     &sgid);
1742 	rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid);
1743 	dev_put(sgid_attr.ndev);
1744 	iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri;
1745 
1746 	return 0;
1747 }
1748 
1749 #if __FreeBSD_version >= 1400026
1750 /**
1751  * irdma_destroy_cq - destroy cq
1752  * @ib_cq: cq pointer
1753  * @udata: user data
1754  */
1755 void
1756 irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1757 {
1758 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1759 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1760 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1761 	struct irdma_sc_dev *dev = cq->dev;
1762 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1763 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1764 	unsigned long flags;
1765 
1766 	spin_lock_irqsave(&iwcq->lock, flags);
1767 	if (!list_empty(&iwcq->cmpl_generated))
1768 		irdma_remove_cmpls_list(iwcq);
1769 	if (!list_empty(&iwcq->resize_list))
1770 		irdma_process_resize_list(iwcq, iwdev, NULL);
1771 	spin_unlock_irqrestore(&iwcq->lock, flags);
1772 
1773 	irdma_cq_rem_ref(ib_cq);
1774 	wait_for_completion(&iwcq->free_cq);
1775 
1776 	irdma_cq_wq_destroy(iwdev->rf, cq);
1777 
1778 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1779 	irdma_sc_cleanup_ceqes(cq, ceq);
1780 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1781 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1782 }
1783 
1784 #endif
1785 #if __FreeBSD_version < 1400026
1786 /**
1787  * irdma_destroy_cq - destroy cq
1788  * @ib_cq: cq pointer
1789  */
1790 int
1791 irdma_destroy_cq(struct ib_cq *ib_cq)
1792 {
1793 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1794 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1795 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1796 	struct irdma_sc_dev *dev = cq->dev;
1797 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1798 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1799 	unsigned long flags;
1800 
1801 	spin_lock_irqsave(&iwcq->lock, flags);
1802 	if (!list_empty(&iwcq->cmpl_generated))
1803 		irdma_remove_cmpls_list(iwcq);
1804 	if (!list_empty(&iwcq->resize_list))
1805 		irdma_process_resize_list(iwcq, iwdev, NULL);
1806 	spin_unlock_irqrestore(&iwcq->lock, flags);
1807 
1808 	irdma_cq_rem_ref(ib_cq);
1809 	wait_for_completion(&iwcq->free_cq);
1810 
1811 	irdma_cq_wq_destroy(iwdev->rf, cq);
1812 
1813 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1814 	irdma_sc_cleanup_ceqes(cq, ceq);
1815 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1816 
1817 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1818 	kfree(iwcq);
1819 
1820 	return 0;
1821 }
1822 
1823 #endif
1824 /**
1825  * irdma_alloc_mw - Allocate memory window
1826  * @pd: Protection domain
1827  * @type: Window type
1828  * @udata: user data pointer
1829  */
1830 struct ib_mw *
1831 irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1832 	       struct ib_udata *udata)
1833 {
1834 	struct irdma_device *iwdev = to_iwdev(pd->device);
1835 	struct irdma_mr *iwmr;
1836 	int err_code;
1837 	u32 stag;
1838 
1839 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1840 	if (!iwmr)
1841 		return ERR_PTR(-ENOMEM);
1842 
1843 	stag = irdma_create_stag(iwdev);
1844 	if (!stag) {
1845 		kfree(iwmr);
1846 		return ERR_PTR(-ENOMEM);
1847 	}
1848 
1849 	iwmr->stag = stag;
1850 	iwmr->ibmw.rkey = stag;
1851 	iwmr->ibmw.pd = pd;
1852 	iwmr->ibmw.type = type;
1853 	iwmr->ibmw.device = pd->device;
1854 
1855 	err_code = irdma_hw_alloc_mw(iwdev, iwmr);
1856 	if (err_code) {
1857 		irdma_free_stag(iwdev, stag);
1858 		kfree(iwmr);
1859 		return ERR_PTR(err_code);
1860 	}
1861 
1862 	return &iwmr->ibmw;
1863 }
1864 
1865 /**
1866  * kc_set_loc_seq_num_mss - Set local seq number and mss
1867  * @cm_node: cm node info
1868  */
1869 void
1870 kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node)
1871 {
1872 	struct timespec ts;
1873 
1874 	getnanotime(&ts);
1875 	cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
1876 	if (cm_node->iwdev->vsi.mtu > 1500 &&
1877 	    2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd)
1878 		cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1879 		    (1500 - IRDMA_MTU_TO_MSS_IPV4) :
1880 		    (1500 - IRDMA_MTU_TO_MSS_IPV6);
1881 	else
1882 		cm_node->tcp_cntxt.mss = (cm_node->ipv4) ?
1883 		    (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) :
1884 		    (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
1885 }
1886 
1887 #if __FreeBSD_version < 1400026
1888 struct irdma_vma_data {
1889 	struct list_head list;
1890 	struct vm_area_struct *vma;
1891 	struct mutex *vma_list_mutex;	/* protect the vma_list */
1892 };
1893 
1894 /**
1895  * irdma_vma_open -
1896  * @vma: User VMA
1897  */
1898 static void
1899 irdma_vma_open(struct vm_area_struct *vma)
1900 {
1901 	vma->vm_ops = NULL;
1902 }
1903 
1904 /**
1905  * irdma_vma_close - Remove vma data from vma list
1906  * @vma: User VMA
1907  */
1908 static void
1909 irdma_vma_close(struct vm_area_struct *vma)
1910 {
1911 	struct irdma_vma_data *vma_data;
1912 
1913 	vma_data = vma->vm_private_data;
1914 	vma->vm_private_data = NULL;
1915 	vma_data->vma = NULL;
1916 	mutex_lock(vma_data->vma_list_mutex);
1917 	list_del(&vma_data->list);
1918 	mutex_unlock(vma_data->vma_list_mutex);
1919 	kfree(vma_data);
1920 }
1921 
1922 static const struct vm_operations_struct irdma_vm_ops = {
1923 	.open = irdma_vma_open,
1924 	.close = irdma_vma_close
1925 };
1926 
1927 /**
1928  * irdma_set_vma_data - Save vma data in context list
1929  * @vma: User VMA
1930  * @context: ib user context
1931  */
1932 static int
1933 irdma_set_vma_data(struct vm_area_struct *vma,
1934 		   struct irdma_ucontext *context)
1935 {
1936 	struct list_head *vma_head = &context->vma_list;
1937 	struct irdma_vma_data *vma_entry;
1938 
1939 	vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL);
1940 	if (!vma_entry)
1941 		return -ENOMEM;
1942 
1943 	vma->vm_private_data = vma_entry;
1944 	vma->vm_ops = &irdma_vm_ops;
1945 
1946 	vma_entry->vma = vma;
1947 	vma_entry->vma_list_mutex = &context->vma_list_mutex;
1948 
1949 	mutex_lock(&context->vma_list_mutex);
1950 	list_add(&vma_entry->list, vma_head);
1951 	mutex_unlock(&context->vma_list_mutex);
1952 
1953 	return 0;
1954 }
1955 
1956 /**
1957  * irdma_disassociate_ucontext - Disassociate user context
1958  * @context: ib user context
1959  */
1960 void
1961 irdma_disassociate_ucontext(struct ib_ucontext *context)
1962 {
1963 	struct irdma_ucontext *ucontext = to_ucontext(context);
1964 
1965 	struct irdma_vma_data *vma_data, *n;
1966 	struct vm_area_struct *vma;
1967 
1968 	mutex_lock(&ucontext->vma_list_mutex);
1969 	list_for_each_entry_safe(vma_data, n, &ucontext->vma_list, list) {
1970 		vma = vma_data->vma;
1971 		zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
1972 
1973 		vma->vm_ops = NULL;
1974 		list_del(&vma_data->list);
1975 		kfree(vma_data);
1976 	}
1977 	mutex_unlock(&ucontext->vma_list_mutex);
1978 }
1979 
1980 int
1981 rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma,
1982 		  unsigned long pfn, unsigned long size, pgprot_t prot)
1983 {
1984 	if (io_remap_pfn_range(vma,
1985 			       vma->vm_start,
1986 			       pfn,
1987 			       size,
1988 			       prot))
1989 		return -EAGAIN;
1990 
1991 	return irdma_set_vma_data(vma, to_ucontext(context));
1992 }
1993 #else
1994 /**
1995  * irdma_disassociate_ucontext - Disassociate user context
1996  * @context: ib user context
1997  */
1998 void
1999 irdma_disassociate_ucontext(struct ib_ucontext *context)
2000 {
2001 }
2002 #endif
2003 
2004 struct ib_device *
2005 ib_device_get_by_netdev(struct ifnet *netdev, int driver_id)
2006 {
2007 	struct irdma_device *iwdev;
2008 	struct irdma_handler *hdl;
2009 	unsigned long flags;
2010 
2011 	spin_lock_irqsave(&irdma_handler_lock, flags);
2012 	list_for_each_entry(hdl, &irdma_handlers, list) {
2013 		iwdev = hdl->iwdev;
2014 		if (netdev == iwdev->netdev) {
2015 			spin_unlock_irqrestore(&irdma_handler_lock,
2016 					       flags);
2017 			return &iwdev->ibdev;
2018 		}
2019 	}
2020 	spin_unlock_irqrestore(&irdma_handler_lock, flags);
2021 
2022 	return NULL;
2023 }
2024 
2025 void
2026 ib_unregister_device_put(struct ib_device *device)
2027 {
2028 	ib_unregister_device(device);
2029 }
2030 
2031 /**
2032  * irdma_query_gid_roce - Query port GID for Roce
2033  * @ibdev: device pointer from stack
2034  * @port: port number
2035  * @index: Entry index
2036  * @gid: Global ID
2037  */
2038 int
2039 irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
2040 		     union ib_gid *gid)
2041 {
2042 	int ret;
2043 
2044 	ret = rdma_query_gid(ibdev, port, index, gid);
2045 	if (ret == -EAGAIN) {
2046 		memcpy(gid, &zgid, sizeof(*gid));
2047 		return 0;
2048 	}
2049 
2050 	return ret;
2051 }
2052 
2053 /**
2054  * irdma_modify_port - modify port attributes
2055  * @ibdev: device pointer from stack
2056  * @port: port number for query
2057  * @mask: Property mask
2058  * @props: returning device attributes
2059  */
2060 int
2061 irdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
2062 		  struct ib_port_modify *props)
2063 {
2064 	if (port > 1)
2065 		return -EINVAL;
2066 
2067 	return 0;
2068 }
2069 
2070 /**
2071  * irdma_query_pkey - Query partition key
2072  * @ibdev: device pointer from stack
2073  * @port: port number
2074  * @index: index of pkey
2075  * @pkey: pointer to store the pkey
2076  */
2077 int
2078 irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
2079 		 u16 *pkey)
2080 {
2081 	if (index >= IRDMA_PKEY_TBL_SZ)
2082 		return -EINVAL;
2083 
2084 	*pkey = IRDMA_DEFAULT_PKEY;
2085 	return 0;
2086 }
2087 
2088 int
2089 irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
2090 			  struct ib_port_immutable *immutable)
2091 {
2092 	struct ib_port_attr attr;
2093 	int err;
2094 
2095 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2096 	err = ib_query_port(ibdev, port_num, &attr);
2097 	if (err)
2098 		return err;
2099 
2100 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2101 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
2102 	immutable->gid_tbl_len = attr.gid_tbl_len;
2103 
2104 	return 0;
2105 }
2106 
2107 int
2108 irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2109 			struct ib_port_immutable *immutable)
2110 {
2111 	struct ib_port_attr attr;
2112 	int err;
2113 
2114 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2115 	err = ib_query_port(ibdev, port_num, &attr);
2116 	if (err)
2117 		return err;
2118 	immutable->gid_tbl_len = 1;
2119 
2120 	return 0;
2121 }
2122 
2123 /**
2124  * irdma_query_port - get port attributes
2125  * @ibdev: device pointer from stack
2126  * @port: port number for query
2127  * @props: returning device attributes
2128  */
2129 int
2130 irdma_query_port(struct ib_device *ibdev, u8 port,
2131 		 struct ib_port_attr *props)
2132 {
2133 	struct irdma_device *iwdev = to_iwdev(ibdev);
2134 	struct ifnet *netdev = iwdev->netdev;
2135 
2136 	/* no need to zero out pros here. done by caller */
2137 
2138 	props->max_mtu = IB_MTU_4096;
2139 	props->active_mtu = ib_mtu_int_to_enum(netdev->if_mtu);
2140 	props->lid = 1;
2141 	props->lmc = 0;
2142 	props->sm_lid = 0;
2143 	props->sm_sl = 0;
2144 	if ((netdev->if_link_state == LINK_STATE_UP) && (netdev->if_drv_flags & IFF_DRV_RUNNING)) {
2145 		props->state = IB_PORT_ACTIVE;
2146 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
2147 	} else {
2148 		props->state = IB_PORT_DOWN;
2149 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
2150 	}
2151 	ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width);
2152 
2153 	if (rdma_protocol_roce(ibdev, 1)) {
2154 		props->gid_tbl_len = 32;
2155 		kc_set_props_ip_gid_caps(props);
2156 		props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
2157 	} else {
2158 		props->gid_tbl_len = 1;
2159 	}
2160 	props->qkey_viol_cntr = 0;
2161 	props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
2162 	props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
2163 
2164 	return 0;
2165 }
2166 
2167 static const char *const irdma_hw_stat_names[] = {
2168 	/* gen1 - 32-bit */
2169 	[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2170 	[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2171 	[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2172 	[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2173 	[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2174 	[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2175 	[IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
2176 	/* gen1 - 64-bit */
2177 	[IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets",
2178 	[IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts",
2179 	[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd",
2180 	[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts",
2181 	[IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets",
2182 	[IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts",
2183 	[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd",
2184 	[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts",
2185 	[IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets",
2186 	[IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts",
2187 	[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd",
2188 	[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts",
2189 	[IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets",
2190 	[IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts",
2191 	[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd",
2192 	[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts",
2193 	[IRDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads",
2194 	[IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends",
2195 	[IRDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites",
2196 	[IRDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads",
2197 	[IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends",
2198 	[IRDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites",
2199 	[IRDMA_HW_STAT_INDEX_RDMAVBND] = "RdmaBnd",
2200 	[IRDMA_HW_STAT_INDEX_RDMAVINV] = "RdmaInv",
2201 
2202 	/* gen2 - 32-bit */
2203 	[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
2204 	[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
2205 	[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
2206 	/* gen2 - 64-bit */
2207 	[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets",
2208 	[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets",
2209 	[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets",
2210 	[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets",
2211 	[IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP",
2212 	[IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP",
2213 	[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd",
2214 	[IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "RetransSegs",
2215 	[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "InOptErrors",
2216 	[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "InProtoErrors",
2217 	[IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "InSegs",
2218 	[IRDMA_HW_STAT_INDEX_TCPTXSEG] = "OutSegs",
2219 };
2220 
2221 /**
2222  * irdma_alloc_hw_stats - Allocate a hw stats structure
2223  * @ibdev: device pointer from stack
2224  * @port_num: port number
2225  */
2226 struct rdma_hw_stats *
2227 irdma_alloc_hw_stats(struct ib_device *ibdev,
2228 		     u8 port_num)
2229 {
2230 	struct irdma_device *iwdev = to_iwdev(ibdev);
2231 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
2232 
2233 	int num_counters = dev->hw_attrs.max_stat_idx;
2234 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2235 
2236 	return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
2237 					  lifespan);
2238 }
2239 
2240 /**
2241  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
2242  * @ibdev: device pointer from stack
2243  * @stats: stats pointer from stack
2244  * @port_num: port number
2245  * @index: which hw counter the stack is requesting we update
2246  */
2247 int
2248 irdma_get_hw_stats(struct ib_device *ibdev,
2249 		   struct rdma_hw_stats *stats, u8 port_num,
2250 		   int index)
2251 {
2252 	struct irdma_device *iwdev = to_iwdev(ibdev);
2253 	struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
2254 
2255 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
2256 		irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
2257 
2258 	memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters);
2259 
2260 	return stats->num_counters;
2261 }
2262 
2263 /**
2264  * irdma_query_gid - Query port GID
2265  * @ibdev: device pointer from stack
2266  * @port: port number
2267  * @index: Entry index
2268  * @gid: Global ID
2269  */
2270 int
2271 irdma_query_gid(struct ib_device *ibdev, u8 port, int index,
2272 		union ib_gid *gid)
2273 {
2274 	struct irdma_device *iwdev = to_iwdev(ibdev);
2275 
2276 	memset(gid->raw, 0, sizeof(gid->raw));
2277 	ether_addr_copy(gid->raw, IF_LLADDR(iwdev->netdev));
2278 
2279 	return 0;
2280 }
2281 
2282 enum rdma_link_layer
2283 irdma_get_link_layer(struct ib_device *ibdev,
2284 		     u8 port_num)
2285 {
2286 	return IB_LINK_LAYER_ETHERNET;
2287 }
2288 
2289 inline enum ib_mtu
2290 ib_mtu_int_to_enum(int mtu)
2291 {
2292 	if (mtu >= 4096)
2293 		return IB_MTU_4096;
2294 	else if (mtu >= 2048)
2295 		return IB_MTU_2048;
2296 	else if (mtu >= 1024)
2297 		return IB_MTU_1024;
2298 	else if (mtu >= 512)
2299 		return IB_MTU_512;
2300 	else
2301 		return IB_MTU_256;
2302 }
2303 
2304 inline void
2305 kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev)
2306 {
2307 	iwdev->ibdev.uverbs_cmd_mask |=
2308 	    BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
2309 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
2310 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
2311 	    BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST);
2312 }
2313 
2314 inline void
2315 kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
2316 {
2317 	iwdev->ibdev.uverbs_cmd_mask =
2318 	    BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
2319 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
2320 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
2321 	    BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
2322 	    BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
2323 	    BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
2324 	    BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
2325 	    BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
2326 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2327 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
2328 	    BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) |
2329 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
2330 	    BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2331 	    BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
2332 	    BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
2333 	    BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
2334 	    BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
2335 	    BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
2336 	    BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
2337 	    BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
2338 	    BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
2339 	    BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
2340 	    BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
2341 	iwdev->ibdev.uverbs_ex_cmd_mask =
2342 	    BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) |
2343 	    BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
2344 
2345 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
2346 		iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ);
2347 }
2348 
2349 int
2350 ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u8 *speed, u8 *width)
2351 {
2352 	struct ifnet *netdev = ibdev->get_netdev(ibdev, port_num);
2353 	u32 netdev_speed;
2354 
2355 	if (!netdev)
2356 		return -ENODEV;
2357 
2358 	netdev_speed = netdev->if_baudrate;
2359 	dev_put(netdev);
2360 	if (netdev_speed <= SPEED_1000) {
2361 		*width = IB_WIDTH_1X;
2362 		*speed = IB_SPEED_SDR;
2363 	} else if (netdev_speed <= SPEED_10000) {
2364 		*width = IB_WIDTH_1X;
2365 		*speed = IB_SPEED_FDR10;
2366 	} else if (netdev_speed <= SPEED_20000) {
2367 		*width = IB_WIDTH_4X;
2368 		*speed = IB_SPEED_DDR;
2369 	} else if (netdev_speed <= SPEED_25000) {
2370 		*width = IB_WIDTH_1X;
2371 		*speed = IB_SPEED_EDR;
2372 	} else if (netdev_speed <= SPEED_40000) {
2373 		*width = IB_WIDTH_4X;
2374 		*speed = IB_SPEED_FDR10;
2375 	} else {
2376 		*width = IB_WIDTH_4X;
2377 		*speed = IB_SPEED_EDR;
2378 	}
2379 
2380 	return 0;
2381 }
2382