xref: /freebsd/sys/dev/irdma/irdma_verbs.c (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "irdma_main.h"
37 
38 /**
39  * irdma_query_device - get device attributes
40  * @ibdev: device pointer from stack
41  * @props: returning device attributes
42  * @udata: user data
43  */
44 static int
45 irdma_query_device(struct ib_device *ibdev,
46 		   struct ib_device_attr *props,
47 		   struct ib_udata *udata)
48 {
49 	struct irdma_device *iwdev = to_iwdev(ibdev);
50 	struct irdma_pci_f *rf = iwdev->rf;
51 	struct pci_dev *pcidev = iwdev->rf->pcidev;
52 	struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
53 
54 	if (udata->inlen || udata->outlen)
55 		return -EINVAL;
56 
57 	memset(props, 0, sizeof(*props));
58 	ether_addr_copy((u8 *)&props->sys_image_guid, IF_LLADDR(iwdev->netdev));
59 	props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
60 	    irdma_fw_minor_ver(&rf->sc_dev);
61 	props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
62 	    IB_DEVICE_MEM_MGT_EXTENSIONS;
63 	props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
64 	props->vendor_id = pcidev->vendor;
65 	props->vendor_part_id = pcidev->device;
66 	props->hw_ver = pcidev->revision;
67 	props->page_size_cap = hw_attrs->page_size_cap;
68 	props->max_mr_size = hw_attrs->max_mr_size;
69 	props->max_qp = rf->max_qp - rf->used_qps;
70 	props->max_qp_wr = hw_attrs->max_qp_wr;
71 	set_max_sge(props, rf);
72 	props->max_cq = rf->max_cq - rf->used_cqs;
73 	props->max_cqe = rf->max_cqe - 1;
74 	props->max_mr = rf->max_mr - rf->used_mrs;
75 	props->max_mw = props->max_mr;
76 	props->max_pd = rf->max_pd - rf->used_pds;
77 	props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
78 	props->max_qp_rd_atom = hw_attrs->max_hw_ird;
79 	props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
80 	if (rdma_protocol_roce(ibdev, 1)) {
81 		props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
82 		props->max_pkeys = IRDMA_PKEY_TBL_SZ;
83 		props->max_ah = rf->max_ah;
84 		if (hw_attrs->uk_attrs.hw_rev == IRDMA_GEN_2) {
85 			props->max_mcast_grp = rf->max_mcg;
86 			props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
87 			props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
88 		}
89 	}
90 	props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
91 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
92 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
93 
94 	return 0;
95 }
96 
97 static int
98 irdma_mmap_legacy(struct irdma_ucontext *ucontext,
99 		  struct vm_area_struct *vma)
100 {
101 	u64 pfn;
102 
103 	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
104 		return -EINVAL;
105 
106 	vma->vm_private_data = ucontext;
107 	pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
108 	       pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
109 
110 #if __FreeBSD_version >= 1400026
111 	return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
112 				 pgprot_noncached(vma->vm_page_prot), NULL);
113 #else
114 	return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
115 				 pgprot_noncached(vma->vm_page_prot));
116 #endif
117 }
118 
119 #if __FreeBSD_version >= 1400026
120 static void
121 irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
122 {
123 	struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
124 
125 	kfree(entry);
126 }
127 
128 struct rdma_user_mmap_entry *
129 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
130 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
131 {
132 	struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
133 	int ret;
134 
135 	if (!entry)
136 		return NULL;
137 
138 	entry->bar_offset = bar_offset;
139 	entry->mmap_flag = mmap_flag;
140 
141 	ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
142 					  &entry->rdma_entry, PAGE_SIZE);
143 	if (ret) {
144 		kfree(entry);
145 		return NULL;
146 	}
147 	*mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
148 
149 	return &entry->rdma_entry;
150 }
151 
152 #else
153 static inline bool
154 find_key_in_mmap_tbl(struct irdma_ucontext *ucontext, u64 key)
155 {
156 	struct irdma_user_mmap_entry *entry;
157 
158 	HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, key) {
159 		if (entry->pgoff_key == key)
160 			return true;
161 	}
162 
163 	return false;
164 }
165 
166 struct irdma_user_mmap_entry *
167 irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
168 			       enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
169 {
170 	struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
171 	unsigned long flags;
172 	int retry_cnt = 0;
173 
174 	if (!entry)
175 		return NULL;
176 
177 	entry->bar_offset = bar_offset;
178 	entry->mmap_flag = mmap_flag;
179 	entry->ucontext = ucontext;
180 	do {
181 		get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key));
182 
183 		/* The key is a page offset */
184 		entry->pgoff_key >>= PAGE_SHIFT;
185 
186 		/* In the event of a collision in the hash table, retry a new key */
187 		spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
188 		if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) {
189 			HASH_ADD(ucontext->mmap_hash_tbl, &entry->hlist, entry->pgoff_key);
190 			spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
191 			goto hash_add_done;
192 		}
193 		spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
194 	} while (retry_cnt++ < 10);
195 
196 	irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS, "mmap table add failed: Cannot find a unique key\n");
197 	kfree(entry);
198 	return NULL;
199 
200 hash_add_done:
201 	/* libc mmap uses a byte offset */
202 	*mmap_offset = entry->pgoff_key << PAGE_SHIFT;
203 
204 	return entry;
205 }
206 
207 static struct irdma_user_mmap_entry *
208 irdma_find_user_mmap_entry(struct irdma_ucontext *ucontext,
209 			   struct vm_area_struct *vma)
210 {
211 	struct irdma_user_mmap_entry *entry;
212 	unsigned long flags;
213 
214 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
215 		return NULL;
216 
217 	spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
218 	HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, vma->vm_pgoff) {
219 		if (entry->pgoff_key == vma->vm_pgoff) {
220 			spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
221 			return entry;
222 		}
223 	}
224 
225 	spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
226 
227 	return NULL;
228 }
229 
230 void
231 irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry)
232 {
233 	struct irdma_ucontext *ucontext;
234 	unsigned long flags;
235 
236 	if (!entry)
237 		return;
238 
239 	ucontext = entry->ucontext;
240 
241 	spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
242 	HASH_DEL(ucontext->mmap_hash_tbl, &entry->hlist);
243 	spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
244 
245 	kfree(entry);
246 }
247 
248 #endif
249 /**
250  * irdma_mmap - user memory map
251  * @context: context created during alloc
252  * @vma: kernel info for user memory map
253  */
254 static int
255 irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
256 {
257 #if __FreeBSD_version >= 1400026
258 	struct rdma_user_mmap_entry *rdma_entry;
259 #endif
260 	struct irdma_user_mmap_entry *entry;
261 	struct irdma_ucontext *ucontext;
262 	u64 pfn;
263 	int ret;
264 
265 	ucontext = to_ucontext(context);
266 
267 	/* Legacy support for libi40iw with hard-coded mmap key */
268 	if (ucontext->legacy_mode)
269 		return irdma_mmap_legacy(ucontext, vma);
270 
271 #if __FreeBSD_version >= 1400026
272 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
273 	if (!rdma_entry) {
274 		irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
275 			    "pgoff[0x%lx] does not have valid entry\n",
276 			    vma->vm_pgoff);
277 		return -EINVAL;
278 	}
279 
280 	entry = to_irdma_mmap_entry(rdma_entry);
281 #else
282 	entry = irdma_find_user_mmap_entry(ucontext, vma);
283 	if (!entry) {
284 		irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
285 			    "pgoff[0x%lx] does not have valid entry\n",
286 			    vma->vm_pgoff);
287 		return -EINVAL;
288 	}
289 #endif
290 	irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
291 		    "bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
292 		    entry->mmap_flag);
293 
294 	pfn = (entry->bar_offset +
295 	       pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
296 
297 	switch (entry->mmap_flag) {
298 	case IRDMA_MMAP_IO_NC:
299 #if __FreeBSD_version >= 1400026
300 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
301 					pgprot_noncached(vma->vm_page_prot),
302 					rdma_entry);
303 #else
304 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
305 					pgprot_noncached(vma->vm_page_prot));
306 #endif
307 		break;
308 	case IRDMA_MMAP_IO_WC:
309 #if __FreeBSD_version >= 1400026
310 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
311 					pgprot_writecombine(vma->vm_page_prot),
312 					rdma_entry);
313 #else
314 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
315 					pgprot_writecombine(vma->vm_page_prot));
316 #endif
317 		break;
318 	default:
319 		ret = -EINVAL;
320 	}
321 
322 	if (ret)
323 		irdma_debug(iwdev_to_idev(ucontext->iwdev), IRDMA_DEBUG_VERBS,
324 			    "bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
325 			    entry->bar_offset, entry->mmap_flag, ret);
326 #if __FreeBSD_version >= 1400026
327 	rdma_user_mmap_entry_put(rdma_entry);
328 #endif
329 
330 	return ret;
331 }
332 
333 /**
334  * irdma_alloc_push_page - allocate a push page for qp
335  * @iwqp: qp pointer
336  */
337 static void
338 irdma_alloc_push_page(struct irdma_qp *iwqp)
339 {
340 	struct irdma_cqp_request *cqp_request;
341 	struct cqp_cmds_info *cqp_info;
342 	struct irdma_device *iwdev = iwqp->iwdev;
343 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
344 	int status;
345 
346 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
347 	if (!cqp_request)
348 		return;
349 
350 	cqp_info = &cqp_request->info;
351 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
352 	cqp_info->post_sq = 1;
353 	cqp_info->in.u.manage_push_page.info.push_idx = 0;
354 	cqp_info->in.u.manage_push_page.info.qs_handle =
355 	    qp->vsi->qos[qp->user_pri].qs_handle;
356 	cqp_info->in.u.manage_push_page.info.free_page = 0;
357 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
358 	cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
359 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
360 
361 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
362 	if (!status && cqp_request->compl_info.op_ret_val <
363 	    iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
364 		qp->push_idx = cqp_request->compl_info.op_ret_val;
365 		qp->push_offset = 0;
366 	}
367 
368 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
369 }
370 
371 /**
372  * irdma_get_pbl - Retrieve pbl from a list given a virtual
373  * address
374  * @va: user virtual address
375  * @pbl_list: pbl list to search in (QP's or CQ's)
376  */
377 struct irdma_pbl *
378 irdma_get_pbl(unsigned long va,
379 	      struct list_head *pbl_list)
380 {
381 	struct irdma_pbl *iwpbl;
382 
383 	list_for_each_entry(iwpbl, pbl_list, list) {
384 		if (iwpbl->user_base == va) {
385 			list_del(&iwpbl->list);
386 			iwpbl->on_list = false;
387 			return iwpbl;
388 		}
389 	}
390 
391 	return NULL;
392 }
393 
394 /**
395  * irdma_clean_cqes - clean cq entries for qp
396  * @iwqp: qp ptr (user or kernel)
397  * @iwcq: cq ptr
398  */
399 void
400 irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
401 {
402 	struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
403 	unsigned long flags;
404 
405 	spin_lock_irqsave(&iwcq->lock, flags);
406 	irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
407 	spin_unlock_irqrestore(&iwcq->lock, flags);
408 }
409 
410 static u64 irdma_compute_push_wqe_offset(struct irdma_device *iwdev, u32 page_idx){
411 	u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
412 
413 	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
414 		/* skip over db page */
415 		bar_off += IRDMA_HW_PAGE_SIZE;
416 		/* skip over reserved space */
417 		bar_off += IRDMA_PF_BAR_RSVD;
418 	}
419 
420 	/* push wqe page */
421 	bar_off += (u64)page_idx * IRDMA_HW_PAGE_SIZE;
422 
423 	return bar_off;
424 }
425 
426 void
427 irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
428 {
429 	if (iwqp->push_db_mmap_entry) {
430 #if __FreeBSD_version >= 1400026
431 		rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
432 #else
433 		irdma_user_mmap_entry_del_hash(iwqp->push_db_mmap_entry);
434 #endif
435 		iwqp->push_db_mmap_entry = NULL;
436 	}
437 	if (iwqp->push_wqe_mmap_entry) {
438 #if __FreeBSD_version >= 1400026
439 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
440 #else
441 		irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
442 #endif
443 		iwqp->push_wqe_mmap_entry = NULL;
444 	}
445 }
446 
447 static int
448 irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
449 			      struct irdma_qp *iwqp,
450 			      u64 *push_wqe_mmap_key,
451 			      u64 *push_db_mmap_key)
452 {
453 	struct irdma_device *iwdev = ucontext->iwdev;
454 	u64 bar_off;
455 
456 	WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2);
457 
458 	bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx);
459 
460 #if __FreeBSD_version >= 1400026
461 	iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
462 								 bar_off, IRDMA_MMAP_IO_WC,
463 								 push_wqe_mmap_key);
464 #else
465 	iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
466 								   IRDMA_MMAP_IO_WC, push_wqe_mmap_key);
467 #endif
468 	if (!iwqp->push_wqe_mmap_entry)
469 		return -ENOMEM;
470 
471 	/* push doorbell page */
472 	bar_off += IRDMA_HW_PAGE_SIZE;
473 #if __FreeBSD_version >= 1400026
474 	iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
475 								bar_off, IRDMA_MMAP_IO_NC,
476 								push_db_mmap_key);
477 #else
478 
479 	iwqp->push_db_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
480 								  IRDMA_MMAP_IO_NC, push_db_mmap_key);
481 #endif
482 	if (!iwqp->push_db_mmap_entry) {
483 #if __FreeBSD_version >= 1400026
484 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
485 #else
486 		irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
487 #endif
488 		return -ENOMEM;
489 	}
490 
491 	return 0;
492 }
493 
494 /**
495  * irdma_setup_virt_qp - setup for allocation of virtual qp
496  * @iwdev: irdma device
497  * @iwqp: qp ptr
498  * @init_info: initialize info to return
499  */
500 void
501 irdma_setup_virt_qp(struct irdma_device *iwdev,
502 		    struct irdma_qp *iwqp,
503 		    struct irdma_qp_init_info *init_info)
504 {
505 	struct irdma_pbl *iwpbl = iwqp->iwpbl;
506 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
507 
508 	iwqp->page = qpmr->sq_page;
509 	init_info->shadow_area_pa = qpmr->shadow;
510 	if (iwpbl->pbl_allocated) {
511 		init_info->virtual_map = true;
512 		init_info->sq_pa = qpmr->sq_pbl.idx;
513 		init_info->rq_pa = qpmr->rq_pbl.idx;
514 	} else {
515 		init_info->sq_pa = qpmr->sq_pbl.addr;
516 		init_info->rq_pa = qpmr->rq_pbl.addr;
517 	}
518 }
519 
520 /**
521  * irdma_setup_umode_qp - setup sq and rq size in user mode qp
522  * @udata: user data
523  * @iwdev: iwarp device
524  * @iwqp: qp ptr (user or kernel)
525  * @info: initialize info to return
526  * @init_attr: Initial QP create attributes
527  */
528 int
529 irdma_setup_umode_qp(struct ib_udata *udata,
530 		     struct irdma_device *iwdev,
531 		     struct irdma_qp *iwqp,
532 		     struct irdma_qp_init_info *info,
533 		     struct ib_qp_init_attr *init_attr)
534 {
535 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
536 	struct irdma_create_qp_req req = {0};
537 	unsigned long flags;
538 	int ret;
539 
540 	ret = ib_copy_from_udata(&req, udata,
541 				 min(sizeof(req), udata->inlen));
542 	if (ret) {
543 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
544 			    "ib_copy_from_data fail\n");
545 		return ret;
546 	}
547 
548 	iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
549 	iwqp->user_mode = 1;
550 	if (req.user_wqe_bufs) {
551 #if __FreeBSD_version >= 1400026
552 		struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
553 #else
554 		struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
555 #endif
556 		info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
557 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
558 		iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
559 					    &ucontext->qp_reg_mem_list);
560 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
561 
562 		if (!iwqp->iwpbl) {
563 			ret = -ENODATA;
564 			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
565 				    "no pbl info\n");
566 			return ret;
567 		}
568 	}
569 
570 	if (ukinfo->abi_ver <= 5) {
571 		/**
572 		 * For ABI version less than 6 passes raw sq and rq
573 		 * quanta in cap.max_send_wr and cap.max_recv_wr.
574 		 */
575 		iwqp->max_send_wr = init_attr->cap.max_send_wr;
576 		iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
577 		ukinfo->sq_size = init_attr->cap.max_send_wr;
578 		ukinfo->rq_size = init_attr->cap.max_recv_wr;
579 		irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift);
580 	} else {
581 		ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
582 						   &ukinfo->sq_shift);
583 		if (ret)
584 			return ret;
585 
586 		ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
587 						   &ukinfo->rq_shift);
588 		if (ret)
589 			return ret;
590 
591 		iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
592 		iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
593 		ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
594 		ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
595 	}
596 	irdma_setup_virt_qp(iwdev, iwqp, info);
597 
598 	return 0;
599 }
600 
601 /**
602  * irdma_setup_kmode_qp - setup initialization for kernel mode qp
603  * @iwdev: iwarp device
604  * @iwqp: qp ptr (user or kernel)
605  * @info: initialize info to return
606  * @init_attr: Initial QP create attributes
607  */
608 int
609 irdma_setup_kmode_qp(struct irdma_device *iwdev,
610 		     struct irdma_qp *iwqp,
611 		     struct irdma_qp_init_info *info,
612 		     struct ib_qp_init_attr *init_attr)
613 {
614 	struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
615 	u32 size;
616 	int status;
617 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
618 
619 	status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
620 					      &ukinfo->sq_shift);
621 	if (status)
622 		return status;
623 
624 	status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
625 					      &ukinfo->rq_shift);
626 	if (status)
627 		return status;
628 
629 	iwqp->kqp.sq_wrid_mem =
630 	    kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
631 	if (!iwqp->kqp.sq_wrid_mem)
632 		return -ENOMEM;
633 
634 	iwqp->kqp.rq_wrid_mem =
635 	    kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
636 	if (!iwqp->kqp.rq_wrid_mem) {
637 		kfree(iwqp->kqp.sq_wrid_mem);
638 		iwqp->kqp.sq_wrid_mem = NULL;
639 		return -ENOMEM;
640 	}
641 
642 	iwqp->kqp.sig_trk_mem = kcalloc(ukinfo->sq_depth, sizeof(u32), GFP_KERNEL);
643 	memset(iwqp->kqp.sig_trk_mem, 0, ukinfo->sq_depth * sizeof(u32));
644 	if (!iwqp->kqp.sig_trk_mem) {
645 		kfree(iwqp->kqp.sq_wrid_mem);
646 		iwqp->kqp.sq_wrid_mem = NULL;
647 		kfree(iwqp->kqp.rq_wrid_mem);
648 		iwqp->kqp.rq_wrid_mem = NULL;
649 		return -ENOMEM;
650 	}
651 	ukinfo->sq_sigwrtrk_array = (void *)iwqp->kqp.sig_trk_mem;
652 	ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
653 	ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
654 
655 	size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
656 	size += (IRDMA_SHADOW_AREA_SIZE << 3);
657 
658 	mem->size = size;
659 	mem->va = irdma_allocate_dma_mem(&iwdev->rf->hw, mem, mem->size,
660 					 256);
661 	if (!mem->va) {
662 		kfree(iwqp->kqp.sq_wrid_mem);
663 		iwqp->kqp.sq_wrid_mem = NULL;
664 		kfree(iwqp->kqp.rq_wrid_mem);
665 		iwqp->kqp.rq_wrid_mem = NULL;
666 		return -ENOMEM;
667 	}
668 
669 	ukinfo->sq = mem->va;
670 	info->sq_pa = mem->pa;
671 	ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
672 	info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
673 	ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
674 	info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
675 	ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
676 	ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
677 	ukinfo->qp_id = iwqp->ibqp.qp_num;
678 
679 	iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
680 	iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
681 	init_attr->cap.max_send_wr = iwqp->max_send_wr;
682 	init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
683 
684 	return 0;
685 }
686 
687 int
688 irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
689 {
690 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
691 	struct irdma_cqp_request *cqp_request;
692 	struct cqp_cmds_info *cqp_info;
693 	struct irdma_create_qp_info *qp_info;
694 	int status;
695 
696 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
697 	if (!cqp_request)
698 		return -ENOMEM;
699 
700 	cqp_info = &cqp_request->info;
701 	qp_info = &cqp_request->info.in.u.qp_create.info;
702 	memset(qp_info, 0, sizeof(*qp_info));
703 	qp_info->mac_valid = true;
704 	qp_info->cq_num_valid = true;
705 	qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
706 
707 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
708 	cqp_info->post_sq = 1;
709 	cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
710 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
711 	status = irdma_handle_cqp_op(rf, cqp_request);
712 	irdma_put_cqp_request(&rf->cqp, cqp_request);
713 
714 	return status;
715 }
716 
717 void
718 irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
719 				   struct irdma_qp_host_ctx_info *ctx_info)
720 {
721 	struct irdma_device *iwdev = iwqp->iwdev;
722 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
723 	struct irdma_roce_offload_info *roce_info;
724 	struct irdma_udp_offload_info *udp_info;
725 
726 	udp_info = &iwqp->udp_info;
727 	udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
728 	udp_info->cwnd = iwdev->roce_cwnd;
729 	udp_info->rexmit_thresh = 2;
730 	udp_info->rnr_nak_thresh = 2;
731 	udp_info->src_port = 0xc000;
732 	udp_info->dst_port = ROCE_V2_UDP_DPORT;
733 	roce_info = &iwqp->roce_info;
734 	ether_addr_copy(roce_info->mac_addr, IF_LLADDR(iwdev->netdev));
735 
736 	roce_info->rd_en = true;
737 	roce_info->wr_rdresp_en = true;
738 	roce_info->bind_en = true;
739 	roce_info->dcqcn_en = false;
740 	roce_info->rtomin = 5;
741 
742 	roce_info->ack_credits = iwdev->roce_ackcreds;
743 	roce_info->ird_size = dev->hw_attrs.max_hw_ird;
744 	roce_info->ord_size = dev->hw_attrs.max_hw_ord;
745 
746 	if (!iwqp->user_mode) {
747 		roce_info->priv_mode_en = true;
748 		roce_info->fast_reg_en = true;
749 		roce_info->udprivcq_en = true;
750 	}
751 	roce_info->roce_tver = 0;
752 
753 	ctx_info->roce_info = &iwqp->roce_info;
754 	ctx_info->udp_info = &iwqp->udp_info;
755 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
756 }
757 
758 void
759 irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
760 				 struct irdma_qp_host_ctx_info *ctx_info)
761 {
762 	struct irdma_device *iwdev = iwqp->iwdev;
763 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
764 	struct irdma_iwarp_offload_info *iwarp_info;
765 
766 	iwarp_info = &iwqp->iwarp_info;
767 	ether_addr_copy(iwarp_info->mac_addr, IF_LLADDR(iwdev->netdev));
768 	iwarp_info->rd_en = true;
769 	iwarp_info->wr_rdresp_en = true;
770 	iwarp_info->bind_en = true;
771 	iwarp_info->ecn_en = true;
772 	iwarp_info->rtomin = 5;
773 
774 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
775 		iwarp_info->ib_rd_en = true;
776 	if (!iwqp->user_mode) {
777 		iwarp_info->priv_mode_en = true;
778 		iwarp_info->fast_reg_en = true;
779 	}
780 	iwarp_info->ddp_ver = 1;
781 	iwarp_info->rdmap_ver = 1;
782 
783 	ctx_info->iwarp_info = &iwqp->iwarp_info;
784 	ctx_info->iwarp_info_valid = true;
785 	irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
786 	ctx_info->iwarp_info_valid = false;
787 }
788 
789 int
790 irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
791 			struct irdma_device *iwdev)
792 {
793 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
794 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
795 
796 	if (init_attr->create_flags)
797 		return -EOPNOTSUPP;
798 
799 	if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
800 	    init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
801 	    init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
802 		return -EINVAL;
803 
804 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
805 		if (init_attr->qp_type != IB_QPT_RC &&
806 		    init_attr->qp_type != IB_QPT_UD &&
807 		    init_attr->qp_type != IB_QPT_GSI)
808 			return -EOPNOTSUPP;
809 	} else {
810 		if (init_attr->qp_type != IB_QPT_RC)
811 			return -EOPNOTSUPP;
812 	}
813 
814 	return 0;
815 }
816 
817 void
818 irdma_sched_qp_flush_work(struct irdma_qp *iwqp)
819 {
820 	irdma_qp_add_ref(&iwqp->ibqp);
821 	if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
822 			     msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)))
823 		irdma_qp_rem_ref(&iwqp->ibqp);
824 }
825 
826 void
827 irdma_flush_worker(struct work_struct *work)
828 {
829 	struct delayed_work *dwork = to_delayed_work(work);
830 	struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
831 
832 	irdma_generate_flush_completions(iwqp);
833 	/* For the add in irdma_sched_qp_flush_work */
834 	irdma_qp_rem_ref(&iwqp->ibqp);
835 }
836 
837 static int
838 irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
839 {
840 	int acc_flags = 0;
841 
842 	if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
843 		if (iwqp->roce_info.wr_rdresp_en) {
844 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
845 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
846 		}
847 		if (iwqp->roce_info.rd_en)
848 			acc_flags |= IB_ACCESS_REMOTE_READ;
849 		if (iwqp->roce_info.bind_en)
850 			acc_flags |= IB_ACCESS_MW_BIND;
851 	} else {
852 		if (iwqp->iwarp_info.wr_rdresp_en) {
853 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
854 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
855 		}
856 		if (iwqp->iwarp_info.rd_en)
857 			acc_flags |= IB_ACCESS_REMOTE_READ;
858 		if (iwqp->iwarp_info.bind_en)
859 			acc_flags |= IB_ACCESS_MW_BIND;
860 	}
861 	return acc_flags;
862 }
863 
864 /**
865  * irdma_query_qp - query qp attributes
866  * @ibqp: qp pointer
867  * @attr: attributes pointer
868  * @attr_mask: Not used
869  * @init_attr: qp attributes to return
870  */
871 static int
872 irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
873 	       int attr_mask, struct ib_qp_init_attr *init_attr)
874 {
875 	struct irdma_qp *iwqp = to_iwqp(ibqp);
876 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
877 
878 	memset(attr, 0, sizeof(*attr));
879 	memset(init_attr, 0, sizeof(*init_attr));
880 
881 	attr->qp_state = iwqp->ibqp_state;
882 	attr->cur_qp_state = iwqp->ibqp_state;
883 	attr->cap.max_send_wr = iwqp->max_send_wr;
884 	attr->cap.max_recv_wr = iwqp->max_recv_wr;
885 	attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
886 	attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
887 	attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
888 	attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
889 	attr->port_num = 1;
890 	if (rdma_protocol_roce(ibqp->device, 1)) {
891 		attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
892 		attr->qkey = iwqp->roce_info.qkey;
893 		attr->rq_psn = iwqp->udp_info.epsn;
894 		attr->sq_psn = iwqp->udp_info.psn_nxt;
895 		attr->dest_qp_num = iwqp->roce_info.dest_qp;
896 		attr->pkey_index = iwqp->roce_info.p_key;
897 		attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
898 		attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
899 		attr->max_rd_atomic = iwqp->roce_info.ord_size;
900 		attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
901 	}
902 
903 	init_attr->event_handler = iwqp->ibqp.event_handler;
904 	init_attr->qp_context = iwqp->ibqp.qp_context;
905 	init_attr->send_cq = iwqp->ibqp.send_cq;
906 	init_attr->recv_cq = iwqp->ibqp.recv_cq;
907 	init_attr->cap = attr->cap;
908 
909 	return 0;
910 }
911 
912 /**
913  * irdma_modify_qp_roce - modify qp request
914  * @ibqp: qp's pointer for modify
915  * @attr: access attributes
916  * @attr_mask: state mask
917  * @udata: user data
918  */
919 int
920 irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
921 		     int attr_mask, struct ib_udata *udata)
922 {
923 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
924 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
925 	struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
926 	struct irdma_qp *iwqp = to_iwqp(ibqp);
927 	struct irdma_device *iwdev = iwqp->iwdev;
928 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
929 	struct irdma_qp_host_ctx_info *ctx_info;
930 	struct irdma_roce_offload_info *roce_info;
931 	struct irdma_udp_offload_info *udp_info;
932 	struct irdma_modify_qp_info info = {0};
933 	struct irdma_modify_qp_resp uresp = {};
934 	struct irdma_modify_qp_req ureq;
935 	unsigned long flags;
936 	u8 issue_modify_qp = 0;
937 	int ret = 0;
938 
939 	ctx_info = &iwqp->ctx_info;
940 	roce_info = &iwqp->roce_info;
941 	udp_info = &iwqp->udp_info;
942 
943 	if (udata) {
944 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
945 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
946 			return -EINVAL;
947 	}
948 
949 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
950 		return -EOPNOTSUPP;
951 
952 	if (attr_mask & IB_QP_DEST_QPN)
953 		roce_info->dest_qp = attr->dest_qp_num;
954 
955 	if (attr_mask & IB_QP_PKEY_INDEX) {
956 		ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
957 				       &roce_info->p_key);
958 		if (ret)
959 			return ret;
960 	}
961 
962 	if (attr_mask & IB_QP_QKEY)
963 		roce_info->qkey = attr->qkey;
964 
965 	if (attr_mask & IB_QP_PATH_MTU)
966 		udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
967 
968 	if (attr_mask & IB_QP_SQ_PSN) {
969 		udp_info->psn_nxt = attr->sq_psn;
970 		udp_info->lsn = 0xffff;
971 		udp_info->psn_una = attr->sq_psn;
972 		udp_info->psn_max = attr->sq_psn;
973 	}
974 
975 	if (attr_mask & IB_QP_RQ_PSN)
976 		udp_info->epsn = attr->rq_psn;
977 
978 	if (attr_mask & IB_QP_RNR_RETRY)
979 		udp_info->rnr_nak_thresh = attr->rnr_retry;
980 
981 	if (attr_mask & IB_QP_RETRY_CNT)
982 		udp_info->rexmit_thresh = attr->retry_cnt;
983 
984 	ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
985 
986 	if (attr_mask & IB_QP_AV) {
987 		struct irdma_av *av = &iwqp->roce_ah.av;
988 		u16 vlan_id = VLAN_N_VID;
989 		u32 local_ip[4] = {};
990 
991 		memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
992 		if (attr->ah_attr.ah_flags & IB_AH_GRH) {
993 			udp_info->ttl = attr->ah_attr.grh.hop_limit;
994 			udp_info->flow_label = attr->ah_attr.grh.flow_label;
995 			udp_info->tos = attr->ah_attr.grh.traffic_class;
996 
997 			udp_info->src_port = kc_rdma_get_udp_sport(udp_info->flow_label,
998 								   ibqp->qp_num,
999 								   roce_info->dest_qp);
1000 
1001 			irdma_qp_rem_qos(&iwqp->sc_qp);
1002 			dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1003 			if (iwqp->sc_qp.vsi->dscp_mode)
1004 				ctx_info->user_pri =
1005 				    iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1006 			else
1007 				ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1008 		}
1009 		ret = kc_irdma_set_roce_cm_info(iwqp, attr, &vlan_id);
1010 		if (ret)
1011 			return ret;
1012 		if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1013 			return -ENOMEM;
1014 		iwqp->sc_qp.user_pri = ctx_info->user_pri;
1015 		irdma_qp_add_qos(&iwqp->sc_qp);
1016 
1017 		if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1018 			vlan_id = 0;
1019 		if (vlan_id < VLAN_N_VID) {
1020 			udp_info->insert_vlan_tag = true;
1021 			udp_info->vlan_tag = vlan_id |
1022 			    ctx_info->user_pri << VLAN_PRIO_SHIFT;
1023 		} else {
1024 			udp_info->insert_vlan_tag = false;
1025 		}
1026 
1027 		av->attrs = attr->ah_attr;
1028 		rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1029 		if (av->sgid_addr.saddr.sa_family == AF_INET6) {
1030 			__be32 *daddr =
1031 			av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
1032 			__be32 *saddr =
1033 			av->sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32;
1034 
1035 			irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1036 			irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1037 
1038 			udp_info->ipv4 = false;
1039 			irdma_copy_ip_ntohl(local_ip, daddr);
1040 
1041 			udp_info->arp_idx = irdma_arp_table(iwdev->rf, local_ip,
1042 							    NULL, IRDMA_ARP_RESOLVE);
1043 		} else {
1044 			__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1045 			__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1046 
1047 			local_ip[0] = ntohl(daddr);
1048 
1049 			udp_info->ipv4 = true;
1050 			udp_info->dest_ip_addr[0] = 0;
1051 			udp_info->dest_ip_addr[1] = 0;
1052 			udp_info->dest_ip_addr[2] = 0;
1053 			udp_info->dest_ip_addr[3] = local_ip[0];
1054 
1055 			udp_info->local_ipaddr[0] = 0;
1056 			udp_info->local_ipaddr[1] = 0;
1057 			udp_info->local_ipaddr[2] = 0;
1058 			udp_info->local_ipaddr[3] = ntohl(saddr);
1059 		}
1060 		udp_info->arp_idx =
1061 		    irdma_add_arp(iwdev->rf, local_ip,
1062 				  ah_attr_to_dmac(attr->ah_attr));
1063 	}
1064 
1065 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1066 		if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1067 			ibdev_err(&iwdev->ibdev,
1068 				  "rd_atomic = %d, above max_hw_ord=%d\n",
1069 				  attr->max_rd_atomic,
1070 				  dev->hw_attrs.max_hw_ord);
1071 			return -EINVAL;
1072 		}
1073 		if (attr->max_rd_atomic)
1074 			roce_info->ord_size = attr->max_rd_atomic;
1075 		info.ord_valid = true;
1076 	}
1077 
1078 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1079 		if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1080 			ibdev_err(&iwdev->ibdev,
1081 				  "rd_atomic = %d, above max_hw_ird=%d\n",
1082 				  attr->max_rd_atomic,
1083 				  dev->hw_attrs.max_hw_ird);
1084 			return -EINVAL;
1085 		}
1086 		if (attr->max_dest_rd_atomic)
1087 			roce_info->ird_size = attr->max_dest_rd_atomic;
1088 	}
1089 
1090 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1091 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1092 			roce_info->wr_rdresp_en = true;
1093 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1094 			roce_info->wr_rdresp_en = true;
1095 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1096 			roce_info->rd_en = true;
1097 	}
1098 
1099 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1100 
1101 	irdma_debug(dev, IRDMA_DEBUG_VERBS,
1102 		    "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1103 		    __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state,
1104 		    iwqp->iwarp_state, attr_mask);
1105 
1106 	spin_lock_irqsave(&iwqp->lock, flags);
1107 	if (attr_mask & IB_QP_STATE) {
1108 		if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1109 					   iwqp->ibqp.qp_type, attr_mask,
1110 					   IB_LINK_LAYER_ETHERNET)) {
1111 			irdma_print("modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1112 				    iwqp->ibqp.qp_num, iwqp->ibqp_state,
1113 				    attr->qp_state);
1114 			ret = -EINVAL;
1115 			goto exit;
1116 		}
1117 		info.curr_iwarp_state = iwqp->iwarp_state;
1118 
1119 		switch (attr->qp_state) {
1120 		case IB_QPS_INIT:
1121 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1122 				ret = -EINVAL;
1123 				goto exit;
1124 			}
1125 
1126 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1127 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1128 				issue_modify_qp = 1;
1129 			}
1130 			break;
1131 		case IB_QPS_RTR:
1132 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1133 				ret = -EINVAL;
1134 				goto exit;
1135 			}
1136 			info.arp_cache_idx_valid = true;
1137 			info.cq_num_valid = true;
1138 			info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1139 			issue_modify_qp = 1;
1140 			break;
1141 		case IB_QPS_RTS:
1142 			if (iwqp->ibqp_state < IB_QPS_RTR ||
1143 			    iwqp->ibqp_state == IB_QPS_ERR) {
1144 				ret = -EINVAL;
1145 				goto exit;
1146 			}
1147 
1148 			info.arp_cache_idx_valid = true;
1149 			info.cq_num_valid = true;
1150 			info.ord_valid = true;
1151 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1152 			issue_modify_qp = 1;
1153 			if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
1154 				iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp);
1155 			udp_info->cwnd = iwdev->roce_cwnd;
1156 			roce_info->ack_credits = iwdev->roce_ackcreds;
1157 			if (iwdev->push_mode && udata &&
1158 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1159 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1160 				spin_unlock_irqrestore(&iwqp->lock, flags);
1161 				irdma_alloc_push_page(iwqp);
1162 				spin_lock_irqsave(&iwqp->lock, flags);
1163 			}
1164 			break;
1165 		case IB_QPS_SQD:
1166 			if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1167 				goto exit;
1168 
1169 			if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1170 				ret = -EINVAL;
1171 				goto exit;
1172 			}
1173 
1174 			info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1175 			issue_modify_qp = 1;
1176 			break;
1177 		case IB_QPS_SQE:
1178 		case IB_QPS_ERR:
1179 		case IB_QPS_RESET:
1180 			if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1181 				spin_unlock_irqrestore(&iwqp->lock, flags);
1182 				info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1183 				irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1184 				spin_lock_irqsave(&iwqp->lock, flags);
1185 			}
1186 
1187 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1188 				spin_unlock_irqrestore(&iwqp->lock, flags);
1189 				if (udata && udata->inlen) {
1190 					if (ib_copy_from_udata(&ureq, udata,
1191 							       min(sizeof(ureq), udata->inlen)))
1192 						return -EINVAL;
1193 
1194 					irdma_flush_wqes(iwqp,
1195 							 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1196 							 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1197 							 IRDMA_REFLUSH);
1198 				}
1199 				return 0;
1200 			}
1201 
1202 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1203 			issue_modify_qp = 1;
1204 			break;
1205 		default:
1206 			ret = -EINVAL;
1207 			goto exit;
1208 		}
1209 
1210 		iwqp->ibqp_state = attr->qp_state;
1211 	}
1212 
1213 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1214 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1215 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1216 	spin_unlock_irqrestore(&iwqp->lock, flags);
1217 
1218 	if (attr_mask & IB_QP_STATE) {
1219 		if (issue_modify_qp) {
1220 			ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1221 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1222 				return -EINVAL;
1223 			spin_lock_irqsave(&iwqp->lock, flags);
1224 			if (iwqp->iwarp_state == info.curr_iwarp_state) {
1225 				iwqp->iwarp_state = info.next_iwarp_state;
1226 				iwqp->ibqp_state = attr->qp_state;
1227 			}
1228 			if (iwqp->ibqp_state > IB_QPS_RTS &&
1229 			    !iwqp->flush_issued) {
1230 				spin_unlock_irqrestore(&iwqp->lock, flags);
1231 				irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1232 						 IRDMA_FLUSH_RQ |
1233 						 IRDMA_FLUSH_WAIT);
1234 				iwqp->flush_issued = 1;
1235 
1236 			} else {
1237 				spin_unlock_irqrestore(&iwqp->lock, flags);
1238 			}
1239 		} else {
1240 			iwqp->ibqp_state = attr->qp_state;
1241 		}
1242 		if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1243 			struct irdma_ucontext *ucontext;
1244 
1245 #if __FreeBSD_version >= 1400026
1246 			ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1247 #else
1248 			ucontext = to_ucontext(ibqp->uobject->context);
1249 #endif
1250 			if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1251 			    !iwqp->push_wqe_mmap_entry &&
1252 			    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1253 							   &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1254 				uresp.push_valid = 1;
1255 				uresp.push_offset = iwqp->sc_qp.push_offset;
1256 			}
1257 			uresp.rd_fence_rate = iwdev->rd_fence_rate;
1258 			ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1259 								  udata->outlen));
1260 			if (ret) {
1261 				irdma_remove_push_mmap_entries(iwqp);
1262 				irdma_debug(iwdev_to_idev(iwdev),
1263 					    IRDMA_DEBUG_VERBS,
1264 					    "copy_to_udata failed\n");
1265 				return ret;
1266 			}
1267 		}
1268 	}
1269 
1270 	return 0;
1271 exit:
1272 	spin_unlock_irqrestore(&iwqp->lock, flags);
1273 
1274 	return ret;
1275 }
1276 
1277 /**
1278  * irdma_modify_qp - modify qp request
1279  * @ibqp: qp's pointer for modify
1280  * @attr: access attributes
1281  * @attr_mask: state mask
1282  * @udata: user data
1283  */
1284 int
1285 irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1286 		struct ib_udata *udata)
1287 {
1288 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1289 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1290 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1291 	struct irdma_device *iwdev = iwqp->iwdev;
1292 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1293 	struct irdma_qp_host_ctx_info *ctx_info;
1294 	struct irdma_tcp_offload_info *tcp_info;
1295 	struct irdma_iwarp_offload_info *offload_info;
1296 	struct irdma_modify_qp_info info = {0};
1297 	struct irdma_modify_qp_resp uresp = {};
1298 	struct irdma_modify_qp_req ureq = {};
1299 	u8 issue_modify_qp = 0;
1300 	u8 dont_wait = 0;
1301 	int err;
1302 	unsigned long flags;
1303 
1304 	if (udata) {
1305 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1306 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1307 			return -EINVAL;
1308 	}
1309 
1310 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1311 		return -EOPNOTSUPP;
1312 
1313 	ctx_info = &iwqp->ctx_info;
1314 	offload_info = &iwqp->iwarp_info;
1315 	tcp_info = &iwqp->tcp_info;
1316 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1317 	irdma_debug(dev, IRDMA_DEBUG_VERBS,
1318 		    "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1319 		    __builtin_return_address(0), ibqp->qp_num, attr->qp_state, iwqp->ibqp_state, iwqp->iwarp_state,
1320 		    iwqp->last_aeq, iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1321 
1322 	spin_lock_irqsave(&iwqp->lock, flags);
1323 	if (attr_mask & IB_QP_STATE) {
1324 		info.curr_iwarp_state = iwqp->iwarp_state;
1325 		switch (attr->qp_state) {
1326 		case IB_QPS_INIT:
1327 		case IB_QPS_RTR:
1328 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1329 				err = -EINVAL;
1330 				goto exit;
1331 			}
1332 
1333 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1334 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1335 				issue_modify_qp = 1;
1336 			}
1337 			if (iwdev->push_mode && udata &&
1338 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1339 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1340 				spin_unlock_irqrestore(&iwqp->lock, flags);
1341 				irdma_alloc_push_page(iwqp);
1342 				spin_lock_irqsave(&iwqp->lock, flags);
1343 			}
1344 			break;
1345 		case IB_QPS_RTS:
1346 			if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1347 			    !iwqp->cm_id) {
1348 				err = -EINVAL;
1349 				goto exit;
1350 			}
1351 
1352 			issue_modify_qp = 1;
1353 			iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1354 			iwqp->hte_added = 1;
1355 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1356 			info.tcp_ctx_valid = true;
1357 			info.ord_valid = true;
1358 			info.arp_cache_idx_valid = true;
1359 			info.cq_num_valid = true;
1360 			break;
1361 		case IB_QPS_SQD:
1362 			if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1363 				err = 0;
1364 				goto exit;
1365 			}
1366 
1367 			if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1368 			    iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1369 				err = 0;
1370 				goto exit;
1371 			}
1372 
1373 			if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1374 				err = -EINVAL;
1375 				goto exit;
1376 			}
1377 
1378 			info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1379 			issue_modify_qp = 1;
1380 			break;
1381 		case IB_QPS_SQE:
1382 			if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1383 				err = -EINVAL;
1384 				goto exit;
1385 			}
1386 
1387 			info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1388 			issue_modify_qp = 1;
1389 			break;
1390 		case IB_QPS_ERR:
1391 		case IB_QPS_RESET:
1392 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1393 				spin_unlock_irqrestore(&iwqp->lock, flags);
1394 				if (udata && udata->inlen) {
1395 					if (ib_copy_from_udata(&ureq, udata,
1396 							       min(sizeof(ureq), udata->inlen)))
1397 						return -EINVAL;
1398 
1399 					irdma_flush_wqes(iwqp,
1400 							 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1401 							 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1402 							 IRDMA_REFLUSH);
1403 				}
1404 				return 0;
1405 			}
1406 
1407 			if (iwqp->sc_qp.term_flags) {
1408 				spin_unlock_irqrestore(&iwqp->lock, flags);
1409 				irdma_terminate_del_timer(&iwqp->sc_qp);
1410 				spin_lock_irqsave(&iwqp->lock, flags);
1411 			}
1412 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1413 			if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1414 			    iwdev->iw_status &&
1415 			    iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1416 				info.reset_tcp_conn = true;
1417 			else
1418 				dont_wait = 1;
1419 
1420 			issue_modify_qp = 1;
1421 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1422 			break;
1423 		default:
1424 			err = -EINVAL;
1425 			goto exit;
1426 		}
1427 
1428 		iwqp->ibqp_state = attr->qp_state;
1429 	}
1430 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1431 		ctx_info->iwarp_info_valid = true;
1432 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1433 			offload_info->wr_rdresp_en = true;
1434 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1435 			offload_info->wr_rdresp_en = true;
1436 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1437 			offload_info->rd_en = true;
1438 	}
1439 
1440 	if (ctx_info->iwarp_info_valid) {
1441 		ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1442 		ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1443 		irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1444 	}
1445 	spin_unlock_irqrestore(&iwqp->lock, flags);
1446 
1447 	if (attr_mask & IB_QP_STATE) {
1448 		if (issue_modify_qp) {
1449 			ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1450 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1451 				return -EINVAL;
1452 		}
1453 
1454 		spin_lock_irqsave(&iwqp->lock, flags);
1455 		if (iwqp->iwarp_state == info.curr_iwarp_state) {
1456 			iwqp->iwarp_state = info.next_iwarp_state;
1457 			iwqp->ibqp_state = attr->qp_state;
1458 		}
1459 		spin_unlock_irqrestore(&iwqp->lock, flags);
1460 	}
1461 
1462 	if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1463 		if (dont_wait) {
1464 			if (iwqp->hw_tcp_state) {
1465 				spin_lock_irqsave(&iwqp->lock, flags);
1466 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1467 				iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1468 				spin_unlock_irqrestore(&iwqp->lock, flags);
1469 			}
1470 			irdma_cm_disconn(iwqp);
1471 		} else {
1472 			int close_timer_started;
1473 
1474 			spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1475 
1476 			if (iwqp->cm_node) {
1477 				atomic_inc(&iwqp->cm_node->refcnt);
1478 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1479 				close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1480 				if (iwqp->cm_id && close_timer_started == 1)
1481 					irdma_schedule_cm_timer(iwqp->cm_node,
1482 								(struct irdma_puda_buf *)iwqp,
1483 								IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1484 
1485 				irdma_rem_ref_cm_node(iwqp->cm_node);
1486 			} else {
1487 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1488 			}
1489 		}
1490 	}
1491 	if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1492 	    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1493 		struct irdma_ucontext *ucontext;
1494 
1495 #if __FreeBSD_version >= 1400026
1496 		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1497 #else
1498 		ucontext = to_ucontext(ibqp->uobject->context);
1499 #endif
1500 		if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1501 		    !iwqp->push_wqe_mmap_entry &&
1502 		    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1503 						   &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1504 			uresp.push_valid = 1;
1505 			uresp.push_offset = iwqp->sc_qp.push_offset;
1506 		}
1507 		uresp.rd_fence_rate = iwdev->rd_fence_rate;
1508 
1509 		err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1510 							  udata->outlen));
1511 		if (err) {
1512 			irdma_remove_push_mmap_entries(iwqp);
1513 			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1514 				    "copy_to_udata failed\n");
1515 			return err;
1516 		}
1517 	}
1518 
1519 	return 0;
1520 exit:
1521 	spin_unlock_irqrestore(&iwqp->lock, flags);
1522 
1523 	return err;
1524 }
1525 
1526 /**
1527  * irdma_cq_free_rsrc - free up resources for cq
1528  * @rf: RDMA PCI function
1529  * @iwcq: cq ptr
1530  */
1531 void
1532 irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1533 {
1534 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1535 
1536 	if (!iwcq->user_mode) {
1537 		irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem);
1538 		irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem_shadow);
1539 	}
1540 
1541 	irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1542 }
1543 
1544 /**
1545  * irdma_free_cqbuf - worker to free a cq buffer
1546  * @work: provides access to the cq buffer to free
1547  */
1548 static void
1549 irdma_free_cqbuf(struct work_struct *work)
1550 {
1551 	struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1552 
1553 	irdma_free_dma_mem(cq_buf->hw, &cq_buf->kmem_buf);
1554 	kfree(cq_buf);
1555 }
1556 
1557 /**
1558  * irdma_process_resize_list - remove resized cq buffers from the resize_list
1559  * @iwcq: cq which owns the resize_list
1560  * @iwdev: irdma device
1561  * @lcqe_buf: the buffer where the last cqe is received
1562  */
1563 int
1564 irdma_process_resize_list(struct irdma_cq *iwcq,
1565 			  struct irdma_device *iwdev,
1566 			  struct irdma_cq_buf *lcqe_buf)
1567 {
1568 	struct list_head *tmp_node, *list_node;
1569 	struct irdma_cq_buf *cq_buf;
1570 	int cnt = 0;
1571 
1572 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1573 		cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1574 		if (cq_buf == lcqe_buf)
1575 			return cnt;
1576 
1577 		list_del(&cq_buf->list);
1578 		queue_work(iwdev->cleanup_wq, &cq_buf->work);
1579 		cnt++;
1580 	}
1581 
1582 	return cnt;
1583 }
1584 
1585 /**
1586  * irdma_resize_cq - resize cq
1587  * @ibcq: cq to be resized
1588  * @entries: desired cq size
1589  * @udata: user data
1590  */
1591 static int
1592 irdma_resize_cq(struct ib_cq *ibcq, int entries,
1593 		struct ib_udata *udata)
1594 {
1595 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
1596 	struct irdma_cq *iwcq = to_iwcq(ibcq);
1597 	struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1598 	struct irdma_cqp_request *cqp_request;
1599 	struct cqp_cmds_info *cqp_info;
1600 	struct irdma_modify_cq_info *m_info;
1601 	struct irdma_modify_cq_info info = {0};
1602 	struct irdma_dma_mem kmem_buf;
1603 	struct irdma_cq_mr *cqmr_buf;
1604 	struct irdma_pbl *iwpbl_buf;
1605 	struct irdma_device *iwdev;
1606 	struct irdma_pci_f *rf;
1607 	struct irdma_cq_buf *cq_buf = NULL;
1608 	unsigned long flags;
1609 	int ret;
1610 
1611 	iwdev = to_iwdev(ibcq->device);
1612 	rf = iwdev->rf;
1613 
1614 	if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1615 	      IRDMA_FEATURE_CQ_RESIZE))
1616 		return -EOPNOTSUPP;
1617 
1618 	if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
1619 		return -EINVAL;
1620 
1621 	if (entries > rf->max_cqe)
1622 		return -EINVAL;
1623 
1624 	if (!iwcq->user_mode) {
1625 		entries++;
1626 		if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1627 			entries *= 2;
1628 	}
1629 
1630 	info.cq_size = max(entries, 4);
1631 
1632 	if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1633 		return 0;
1634 
1635 	if (udata) {
1636 		struct irdma_resize_cq_req req = {};
1637 		struct irdma_ucontext *ucontext =
1638 #if __FreeBSD_version >= 1400026
1639 		rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
1640 #else
1641 		to_ucontext(ibcq->uobject->context);
1642 #endif
1643 
1644 		/* CQ resize not supported with legacy GEN_1 libi40iw */
1645 		if (ucontext->legacy_mode)
1646 			return -EOPNOTSUPP;
1647 
1648 		if (ib_copy_from_udata(&req, udata,
1649 				       min(sizeof(req), udata->inlen)))
1650 			return -EINVAL;
1651 
1652 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1653 		iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1654 					  &ucontext->cq_reg_mem_list);
1655 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1656 
1657 		if (!iwpbl_buf)
1658 			return -ENOMEM;
1659 
1660 		cqmr_buf = &iwpbl_buf->cq_mr;
1661 		if (iwpbl_buf->pbl_allocated) {
1662 			info.virtual_map = true;
1663 			info.pbl_chunk_size = 1;
1664 			info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1665 		} else {
1666 			info.cq_pa = cqmr_buf->cq_pbl.addr;
1667 		}
1668 	} else {
1669 		/* Kmode CQ resize */
1670 		int rsize;
1671 
1672 		rsize = info.cq_size * sizeof(struct irdma_cqe);
1673 		kmem_buf.size = round_up(rsize, 256);
1674 		kmem_buf.va = irdma_allocate_dma_mem(dev->hw, &kmem_buf,
1675 						     kmem_buf.size, 256);
1676 		if (!kmem_buf.va)
1677 			return -ENOMEM;
1678 
1679 		info.cq_base = kmem_buf.va;
1680 		info.cq_pa = kmem_buf.pa;
1681 		cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1682 		if (!cq_buf) {
1683 			ret = -ENOMEM;
1684 			goto error;
1685 		}
1686 	}
1687 
1688 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1689 	if (!cqp_request) {
1690 		ret = -ENOMEM;
1691 		goto error;
1692 	}
1693 
1694 	info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1695 	info.cq_resize = true;
1696 
1697 	cqp_info = &cqp_request->info;
1698 	m_info = &cqp_info->in.u.cq_modify.info;
1699 	memcpy(m_info, &info, sizeof(*m_info));
1700 
1701 	cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1702 	cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1703 	cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1704 	cqp_info->post_sq = 1;
1705 	ret = irdma_handle_cqp_op(rf, cqp_request);
1706 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1707 	if (ret)
1708 		goto error;
1709 
1710 	spin_lock_irqsave(&iwcq->lock, flags);
1711 	if (cq_buf) {
1712 		cq_buf->kmem_buf = iwcq->kmem;
1713 		cq_buf->hw = dev->hw;
1714 		memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1715 		INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1716 		list_add_tail(&cq_buf->list, &iwcq->resize_list);
1717 		iwcq->kmem = kmem_buf;
1718 	}
1719 
1720 	irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1721 	ibcq->cqe = info.cq_size - 1;
1722 	spin_unlock_irqrestore(&iwcq->lock, flags);
1723 
1724 	return 0;
1725 error:
1726 	if (!udata)
1727 		irdma_free_dma_mem(dev->hw, &kmem_buf);
1728 	kfree(cq_buf);
1729 
1730 	return ret;
1731 }
1732 
1733 /**
1734  * irdma_get_mr_access - get hw MR access permissions from IB access flags
1735  * @access: IB access flags
1736  */
1737 static inline u16 irdma_get_mr_access(int access){
1738 	u16 hw_access = 0;
1739 
1740 	hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
1741 	    IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
1742 	hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
1743 	    IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
1744 	hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
1745 	    IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
1746 	hw_access |= (access & IB_ACCESS_MW_BIND) ?
1747 	    IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
1748 	hw_access |= (access & IB_ZERO_BASED) ?
1749 	    IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
1750 	hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
1751 
1752 	return hw_access;
1753 }
1754 
1755 /**
1756  * irdma_free_stag - free stag resource
1757  * @iwdev: irdma device
1758  * @stag: stag to free
1759  */
1760 void
1761 irdma_free_stag(struct irdma_device *iwdev, u32 stag)
1762 {
1763 	u32 stag_idx;
1764 
1765 	stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
1766 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
1767 }
1768 
1769 /**
1770  * irdma_create_stag - create random stag
1771  * @iwdev: irdma device
1772  */
1773 u32
1774 irdma_create_stag(struct irdma_device *iwdev)
1775 {
1776 	u32 stag;
1777 	u32 stag_index = 0;
1778 	u32 next_stag_index;
1779 	u32 driver_key;
1780 	u32 random;
1781 	u8 consumer_key;
1782 	int ret;
1783 
1784 	get_random_bytes(&random, sizeof(random));
1785 	consumer_key = (u8)random;
1786 
1787 	driver_key = random & ~iwdev->rf->mr_stagmask;
1788 	next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
1789 	next_stag_index %= iwdev->rf->max_mr;
1790 
1791 	ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
1792 			       iwdev->rf->max_mr, &stag_index,
1793 			       &next_stag_index);
1794 	if (ret)
1795 		return 0;
1796 	stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
1797 	stag |= driver_key;
1798 	stag += (u32)consumer_key;
1799 
1800 	return stag;
1801 }
1802 
1803 /**
1804  * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
1805  * @arr: lvl1 pbl array
1806  * @npages: page count
1807  * @pg_size: page size
1808  *
1809  */
1810 static bool
1811 irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1812 {
1813 	u32 pg_idx;
1814 
1815 	for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1816 		if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1817 			return false;
1818 	}
1819 
1820 	return true;
1821 }
1822 
1823 /**
1824  * irdma_check_mr_contiguous - check if MR is physically contiguous
1825  * @palloc: pbl allocation struct
1826  * @pg_size: page size
1827  */
1828 static bool
1829 irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
1830 			  u32 pg_size)
1831 {
1832 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
1833 	struct irdma_pble_info *leaf = lvl2->leaf;
1834 	u64 *arr = NULL;
1835 	u64 *start_addr = NULL;
1836 	int i;
1837 	bool ret;
1838 
1839 	if (palloc->level == PBLE_LEVEL_1) {
1840 		arr = palloc->level1.addr;
1841 		ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
1842 						 pg_size);
1843 		return ret;
1844 	}
1845 
1846 	start_addr = leaf->addr;
1847 
1848 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1849 		arr = leaf->addr;
1850 		if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1851 			return false;
1852 		ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
1853 		if (!ret)
1854 			return false;
1855 	}
1856 
1857 	return true;
1858 }
1859 
1860 /**
1861  * irdma_setup_pbles - copy user pg address to pble's
1862  * @rf: RDMA PCI function
1863  * @iwmr: mr pointer for this memory registration
1864  * @use_pbles: flag if to use pble's
1865  * @lvl_1_only: request only level 1 pble if true
1866  */
1867 static int
1868 irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
1869 		  bool use_pbles, bool lvl_1_only)
1870 {
1871 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
1872 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1873 	struct irdma_pble_info *pinfo;
1874 	u64 *pbl;
1875 	int status;
1876 	enum irdma_pble_level level = PBLE_LEVEL_1;
1877 
1878 	if (use_pbles) {
1879 		status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
1880 					lvl_1_only);
1881 		if (status)
1882 			return status;
1883 
1884 		iwpbl->pbl_allocated = true;
1885 		level = palloc->level;
1886 		pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
1887 		    palloc->level2.leaf;
1888 		pbl = pinfo->addr;
1889 	} else {
1890 		pbl = iwmr->pgaddrmem;
1891 	}
1892 
1893 	irdma_copy_user_pgaddrs(iwmr, pbl, level);
1894 
1895 	if (use_pbles)
1896 		iwmr->pgaddrmem[0] = *pbl;
1897 
1898 	return 0;
1899 }
1900 
1901 /**
1902  * irdma_handle_q_mem - handle memory for qp and cq
1903  * @iwdev: irdma device
1904  * @req: information for q memory management
1905  * @iwpbl: pble struct
1906  * @use_pbles: flag to use pble
1907  */
1908 static int
1909 irdma_handle_q_mem(struct irdma_device *iwdev,
1910 		   struct irdma_mem_reg_req *req,
1911 		   struct irdma_pbl *iwpbl, bool use_pbles)
1912 {
1913 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
1914 	struct irdma_mr *iwmr = iwpbl->iwmr;
1915 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
1916 	struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
1917 	struct irdma_hmc_pble *hmc_p;
1918 	u64 *arr = iwmr->pgaddrmem;
1919 	u32 pg_size, total;
1920 	int err = 0;
1921 	bool ret = true;
1922 
1923 	pg_size = iwmr->page_size;
1924 	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
1925 	if (err)
1926 		return err;
1927 
1928 	if (use_pbles)
1929 		arr = palloc->level1.addr;
1930 
1931 	switch (iwmr->type) {
1932 	case IRDMA_MEMREG_TYPE_QP:
1933 		total = req->sq_pages + req->rq_pages;
1934 		hmc_p = &qpmr->sq_pbl;
1935 		qpmr->shadow = (dma_addr_t) arr[total];
1936 		if (use_pbles) {
1937 			ret = irdma_check_mem_contiguous(arr, req->sq_pages,
1938 							 pg_size);
1939 			if (ret)
1940 				ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
1941 								 req->rq_pages,
1942 								 pg_size);
1943 		}
1944 
1945 		if (!ret) {
1946 			hmc_p->idx = palloc->level1.idx;
1947 			hmc_p = &qpmr->rq_pbl;
1948 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
1949 		} else {
1950 			hmc_p->addr = arr[0];
1951 			hmc_p = &qpmr->rq_pbl;
1952 			hmc_p->addr = arr[req->sq_pages];
1953 		}
1954 		break;
1955 	case IRDMA_MEMREG_TYPE_CQ:
1956 		hmc_p = &cqmr->cq_pbl;
1957 
1958 		if (!cqmr->split)
1959 			cqmr->shadow = (dma_addr_t) arr[req->cq_pages];
1960 
1961 		if (use_pbles)
1962 			ret = irdma_check_mem_contiguous(arr, req->cq_pages,
1963 							 pg_size);
1964 
1965 		if (!ret)
1966 			hmc_p->idx = palloc->level1.idx;
1967 		else
1968 			hmc_p->addr = arr[0];
1969 		break;
1970 	default:
1971 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
1972 			    "MR type error\n");
1973 		err = -EINVAL;
1974 	}
1975 
1976 	if (use_pbles && ret) {
1977 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
1978 		iwpbl->pbl_allocated = false;
1979 	}
1980 
1981 	return err;
1982 }
1983 
1984 /**
1985  * irdma_hw_alloc_mw - create the hw memory window
1986  * @iwdev: irdma device
1987  * @iwmr: pointer to memory window info
1988  */
1989 int
1990 irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
1991 {
1992 	struct irdma_mw_alloc_info *info;
1993 	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1994 	struct irdma_cqp_request *cqp_request;
1995 	struct cqp_cmds_info *cqp_info;
1996 	int status;
1997 
1998 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
1999 	if (!cqp_request)
2000 		return -ENOMEM;
2001 
2002 	cqp_info = &cqp_request->info;
2003 	info = &cqp_info->in.u.mw_alloc.info;
2004 	memset(info, 0, sizeof(*info));
2005 	if (iwmr->ibmw.type == IB_MW_TYPE_1)
2006 		info->mw_wide = true;
2007 
2008 	info->page_size = PAGE_SIZE;
2009 	info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2010 	info->pd_id = iwpd->sc_pd.pd_id;
2011 	info->remote_access = true;
2012 	cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2013 	cqp_info->post_sq = 1;
2014 	cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2015 	cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2016 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2017 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2018 
2019 	return status;
2020 }
2021 
2022 /**
2023  * irdma_dealloc_mw - Dealloc memory window
2024  * @ibmw: memory window structure.
2025  */
2026 static int
2027 irdma_dealloc_mw(struct ib_mw *ibmw)
2028 {
2029 	struct ib_pd *ibpd = ibmw->pd;
2030 	struct irdma_pd *iwpd = to_iwpd(ibpd);
2031 	struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2032 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
2033 	struct irdma_cqp_request *cqp_request;
2034 	struct cqp_cmds_info *cqp_info;
2035 	struct irdma_dealloc_stag_info *info;
2036 
2037 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2038 	if (!cqp_request)
2039 		return -ENOMEM;
2040 
2041 	cqp_info = &cqp_request->info;
2042 	info = &cqp_info->in.u.dealloc_stag.info;
2043 	memset(info, 0, sizeof(*info));
2044 	info->pd_id = iwpd->sc_pd.pd_id;
2045 	info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
2046 	info->mr = false;
2047 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2048 	cqp_info->post_sq = 1;
2049 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2050 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2051 	irdma_handle_cqp_op(iwdev->rf, cqp_request);
2052 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2053 	irdma_free_stag(iwdev, iwmr->stag);
2054 	kfree(iwmr);
2055 
2056 	return 0;
2057 }
2058 
2059 /**
2060  * irdma_hw_alloc_stag - cqp command to allocate stag
2061  * @iwdev: irdma device
2062  * @iwmr: irdma mr pointer
2063  */
2064 int
2065 irdma_hw_alloc_stag(struct irdma_device *iwdev,
2066 		    struct irdma_mr *iwmr)
2067 {
2068 	struct irdma_allocate_stag_info *info;
2069 	struct ib_pd *pd = iwmr->ibmr.pd;
2070 	struct irdma_pd *iwpd = to_iwpd(pd);
2071 	struct irdma_cqp_request *cqp_request;
2072 	struct cqp_cmds_info *cqp_info;
2073 	int status;
2074 
2075 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2076 	if (!cqp_request)
2077 		return -ENOMEM;
2078 
2079 	cqp_info = &cqp_request->info;
2080 	info = &cqp_info->in.u.alloc_stag.info;
2081 	memset(info, 0, sizeof(*info));
2082 	info->page_size = PAGE_SIZE;
2083 	info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2084 	info->pd_id = iwpd->sc_pd.pd_id;
2085 	info->total_len = iwmr->len;
2086 	info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
2087 	info->remote_access = true;
2088 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2089 	cqp_info->post_sq = 1;
2090 	cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2091 	cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2092 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2093 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2094 	if (!status)
2095 		iwmr->is_hwreg = 1;
2096 
2097 	return status;
2098 }
2099 
2100 /**
2101  * irdma_set_page - populate pbl list for fmr
2102  * @ibmr: ib mem to access iwarp mr pointer
2103  * @addr: page dma address fro pbl list
2104  */
2105 static int
2106 irdma_set_page(struct ib_mr *ibmr, u64 addr)
2107 {
2108 	struct irdma_mr *iwmr = to_iwmr(ibmr);
2109 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2110 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2111 	u64 *pbl;
2112 
2113 	if (unlikely(iwmr->npages == iwmr->page_cnt))
2114 		return -ENOMEM;
2115 
2116 	if (palloc->level == PBLE_LEVEL_2) {
2117 		struct irdma_pble_info *palloc_info =
2118 		palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
2119 
2120 		palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
2121 	} else {
2122 		pbl = palloc->level1.addr;
2123 		pbl[iwmr->npages] = addr;
2124 	}
2125 
2126 	iwmr->npages++;
2127 	return 0;
2128 }
2129 
2130 /**
2131  * irdma_map_mr_sg - map of sg list for fmr
2132  * @ibmr: ib mem to access iwarp mr pointer
2133  * @sg: scatter gather list
2134  * @sg_nents: number of sg pages
2135  * @sg_offset: scatter gather list for fmr
2136  */
2137 static int
2138 irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2139 		int sg_nents, unsigned int *sg_offset)
2140 {
2141 	struct irdma_mr *iwmr = to_iwmr(ibmr);
2142 
2143 	iwmr->npages = 0;
2144 
2145 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2146 }
2147 
2148 /**
2149  * irdma_hwreg_mr - send cqp command for memory registration
2150  * @iwdev: irdma device
2151  * @iwmr: irdma mr pointer
2152  * @access: access for MR
2153  */
2154 int
2155 irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2156 	       u16 access)
2157 {
2158 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2159 	struct irdma_reg_ns_stag_info *stag_info;
2160 	struct ib_pd *pd = iwmr->ibmr.pd;
2161 	struct irdma_pd *iwpd = to_iwpd(pd);
2162 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2163 	struct irdma_cqp_request *cqp_request;
2164 	struct cqp_cmds_info *cqp_info;
2165 	int ret;
2166 
2167 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2168 	if (!cqp_request)
2169 		return -ENOMEM;
2170 
2171 	cqp_info = &cqp_request->info;
2172 	stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2173 	memset(stag_info, 0, sizeof(*stag_info));
2174 	stag_info->va = iwpbl->user_base;
2175 	stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2176 	stag_info->stag_key = (u8)iwmr->stag;
2177 	stag_info->total_len = iwmr->len;
2178 	stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
2179 	stag_info->access_rights = irdma_get_mr_access(access);
2180 	stag_info->pd_id = iwpd->sc_pd.pd_id;
2181 	if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2182 		stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2183 	else
2184 		stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2185 	stag_info->page_size = iwmr->page_size;
2186 
2187 	if (iwpbl->pbl_allocated) {
2188 		if (palloc->level == PBLE_LEVEL_1) {
2189 			stag_info->first_pm_pbl_index = palloc->level1.idx;
2190 			stag_info->chunk_size = 1;
2191 		} else {
2192 			stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2193 			stag_info->chunk_size = 3;
2194 		}
2195 	} else {
2196 		stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2197 	}
2198 
2199 	cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2200 	cqp_info->post_sq = 1;
2201 	cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2202 	cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2203 	ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2204 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2205 
2206 	if (!ret)
2207 		iwmr->is_hwreg = 1;
2208 
2209 	return ret;
2210 }
2211 
2212 /**
2213  * irdma_reg_user_mr - Register a user memory region
2214  * @pd: ptr of pd
2215  * @start: virtual start address
2216  * @len: length of mr
2217  * @virt: virtual address
2218  * @access: access of mr
2219  * @udata: user data
2220  */
2221 static struct ib_mr *
2222 irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2223 		  u64 virt, int access,
2224 		  struct ib_udata *udata)
2225 {
2226 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
2227 	struct irdma_device *iwdev = to_iwdev(pd->device);
2228 	struct irdma_ucontext *ucontext;
2229 	struct irdma_pble_alloc *palloc;
2230 	struct irdma_pbl *iwpbl;
2231 	struct irdma_mr *iwmr;
2232 	struct ib_umem *region;
2233 	struct irdma_mem_reg_req req = {};
2234 	u32 total, stag = 0;
2235 	u8 shadow_pgcnt = 1;
2236 	bool use_pbles = false;
2237 	unsigned long flags;
2238 	int err = -EINVAL;
2239 	int ret;
2240 
2241 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2242 		return ERR_PTR(-EINVAL);
2243 
2244 	if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
2245 		return ERR_PTR(-EINVAL);
2246 
2247 	region = ib_umem_get(pd->uobject->context, start, len, access, 0);
2248 
2249 	if (IS_ERR(region)) {
2250 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
2251 			    "Failed to create ib_umem region\n");
2252 		return (struct ib_mr *)region;
2253 	}
2254 
2255 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2256 		ib_umem_release(region);
2257 		return ERR_PTR(-EFAULT);
2258 	}
2259 
2260 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2261 	if (!iwmr) {
2262 		ib_umem_release(region);
2263 		return ERR_PTR(-ENOMEM);
2264 	}
2265 
2266 	iwpbl = &iwmr->iwpbl;
2267 	iwpbl->iwmr = iwmr;
2268 	iwmr->region = region;
2269 	iwmr->ibmr.pd = pd;
2270 	iwmr->ibmr.device = pd->device;
2271 	iwmr->ibmr.iova = virt;
2272 	iwmr->page_size = IRDMA_HW_PAGE_SIZE;
2273 	iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1);
2274 
2275 	iwmr->len = region->length;
2276 	iwpbl->user_base = virt;
2277 	palloc = &iwpbl->pble_alloc;
2278 	iwmr->type = req.reg_type;
2279 	iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt);
2280 
2281 	switch (req.reg_type) {
2282 	case IRDMA_MEMREG_TYPE_QP:
2283 		total = req.sq_pages + req.rq_pages + shadow_pgcnt;
2284 		if (total > iwmr->page_cnt) {
2285 			err = -EINVAL;
2286 			goto error;
2287 		}
2288 		total = req.sq_pages + req.rq_pages;
2289 		use_pbles = (total > 2);
2290 		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2291 		if (err)
2292 			goto error;
2293 
2294 #if __FreeBSD_version >= 1400026
2295 		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
2296 #else
2297 		ucontext = to_ucontext(pd->uobject->context);
2298 #endif
2299 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2300 		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2301 		iwpbl->on_list = true;
2302 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2303 		break;
2304 	case IRDMA_MEMREG_TYPE_CQ:
2305 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2306 			shadow_pgcnt = 0;
2307 		total = req.cq_pages + shadow_pgcnt;
2308 		if (total > iwmr->page_cnt) {
2309 			err = -EINVAL;
2310 			goto error;
2311 		}
2312 
2313 		use_pbles = (req.cq_pages > 1);
2314 		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2315 		if (err)
2316 			goto error;
2317 
2318 #if __FreeBSD_version >= 1400026
2319 		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
2320 #else
2321 		ucontext = to_ucontext(pd->uobject->context);
2322 #endif
2323 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2324 		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2325 		iwpbl->on_list = true;
2326 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2327 		break;
2328 	case IRDMA_MEMREG_TYPE_MEM:
2329 		use_pbles = (iwmr->page_cnt != 1);
2330 
2331 		err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
2332 		if (err)
2333 			goto error;
2334 
2335 		if (use_pbles) {
2336 			ret = irdma_check_mr_contiguous(palloc,
2337 							iwmr->page_size);
2338 			if (ret) {
2339 				irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2340 				iwpbl->pbl_allocated = false;
2341 			}
2342 		}
2343 
2344 		stag = irdma_create_stag(iwdev);
2345 		if (!stag) {
2346 			err = -ENOMEM;
2347 			goto error;
2348 		}
2349 
2350 		iwmr->stag = stag;
2351 		iwmr->ibmr.rkey = stag;
2352 		iwmr->ibmr.lkey = stag;
2353 		iwmr->access = access;
2354 		err = irdma_hwreg_mr(iwdev, iwmr, access);
2355 		if (err) {
2356 			irdma_free_stag(iwdev, stag);
2357 			goto error;
2358 		}
2359 
2360 		break;
2361 	default:
2362 		goto error;
2363 	}
2364 
2365 	iwmr->type = req.reg_type;
2366 
2367 	return &iwmr->ibmr;
2368 
2369 error:
2370 	if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2371 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2372 	ib_umem_release(region);
2373 	kfree(iwmr);
2374 
2375 	return ERR_PTR(err);
2376 }
2377 
2378 int
2379 irdma_hwdereg_mr(struct ib_mr *ib_mr)
2380 {
2381 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
2382 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
2383 	struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
2384 	struct irdma_dealloc_stag_info *info;
2385 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2386 	struct irdma_cqp_request *cqp_request;
2387 	struct cqp_cmds_info *cqp_info;
2388 	int status;
2389 
2390 	/*
2391 	 * Skip HW MR de-register when it is already de-registered during an MR re-reregister and the re-registration
2392 	 * fails
2393 	 */
2394 	if (!iwmr->is_hwreg)
2395 		return 0;
2396 
2397 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2398 	if (!cqp_request)
2399 		return -ENOMEM;
2400 
2401 	cqp_info = &cqp_request->info;
2402 	info = &cqp_info->in.u.dealloc_stag.info;
2403 	memset(info, 0, sizeof(*info));
2404 	info->pd_id = iwpd->sc_pd.pd_id;
2405 	info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S);
2406 	info->mr = true;
2407 	if (iwpbl->pbl_allocated)
2408 		info->dealloc_pbl = true;
2409 
2410 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2411 	cqp_info->post_sq = 1;
2412 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2413 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2414 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2415 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2416 
2417 	if (!status)
2418 		iwmr->is_hwreg = 0;
2419 
2420 	return status;
2421 }
2422 
2423 /*
2424  * irdma_rereg_mr_trans - Re-register a user MR for a change translation. @iwmr: ptr of iwmr @start: virtual start
2425  * address @len: length of mr @virt: virtual address
2426  *
2427  * Re-register a user memory region when a change translation is requested. Re-register a new region while reusing the
2428  * stag from the original registration.
2429  */
2430 struct ib_mr *
2431 irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
2432 		     u64 virt, struct ib_udata *udata)
2433 {
2434 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2435 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2436 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2437 	struct ib_pd *pd = iwmr->ibmr.pd;
2438 	struct ib_umem *region;
2439 	bool use_pbles;
2440 	int err;
2441 
2442 	region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
2443 
2444 	if (IS_ERR(region)) {
2445 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
2446 			    "Failed to create ib_umem region\n");
2447 		return (struct ib_mr *)region;
2448 	}
2449 
2450 	iwmr->region = region;
2451 	iwmr->ibmr.iova = virt;
2452 	iwmr->ibmr.pd = pd;
2453 	iwmr->page_size = PAGE_SIZE;
2454 
2455 	iwmr->len = region->length;
2456 	iwpbl->user_base = virt;
2457 	iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
2458 						      virt);
2459 
2460 	use_pbles = (iwmr->page_cnt != 1);
2461 
2462 	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
2463 	if (err)
2464 		goto error;
2465 
2466 	if (use_pbles) {
2467 		err = irdma_check_mr_contiguous(palloc,
2468 						iwmr->page_size);
2469 		if (err) {
2470 			irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2471 			iwpbl->pbl_allocated = false;
2472 		}
2473 	}
2474 
2475 	err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
2476 	if (err)
2477 		goto error;
2478 
2479 	return &iwmr->ibmr;
2480 
2481 error:
2482 	if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) {
2483 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2484 		iwpbl->pbl_allocated = false;
2485 	}
2486 	ib_umem_release(region);
2487 	iwmr->region = NULL;
2488 
2489 	return ERR_PTR(err);
2490 }
2491 
2492 /**
2493  * irdma_reg_phys_mr - register kernel physical memory
2494  * @pd: ibpd pointer
2495  * @addr: physical address of memory to register
2496  * @size: size of memory to register
2497  * @access: Access rights
2498  * @iova_start: start of virtual address for physical buffers
2499  */
2500 struct ib_mr *
2501 irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
2502 		  u64 *iova_start)
2503 {
2504 	struct irdma_device *iwdev = to_iwdev(pd->device);
2505 	struct irdma_pbl *iwpbl;
2506 	struct irdma_mr *iwmr;
2507 	u32 stag;
2508 	int ret;
2509 
2510 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2511 	if (!iwmr)
2512 		return ERR_PTR(-ENOMEM);
2513 
2514 	iwmr->ibmr.pd = pd;
2515 	iwmr->ibmr.device = pd->device;
2516 	iwpbl = &iwmr->iwpbl;
2517 	iwpbl->iwmr = iwmr;
2518 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2519 	iwpbl->user_base = *iova_start;
2520 	stag = irdma_create_stag(iwdev);
2521 	if (!stag) {
2522 		ret = -ENOMEM;
2523 		goto err;
2524 	}
2525 
2526 	iwmr->stag = stag;
2527 	iwmr->ibmr.iova = *iova_start;
2528 	iwmr->ibmr.rkey = stag;
2529 	iwmr->ibmr.lkey = stag;
2530 	iwmr->page_cnt = 1;
2531 	iwmr->pgaddrmem[0] = addr;
2532 	iwmr->len = size;
2533 	iwmr->page_size = SZ_4K;
2534 	ret = irdma_hwreg_mr(iwdev, iwmr, access);
2535 	if (ret) {
2536 		irdma_free_stag(iwdev, stag);
2537 		goto err;
2538 	}
2539 
2540 	return &iwmr->ibmr;
2541 
2542 err:
2543 	kfree(iwmr);
2544 
2545 	return ERR_PTR(ret);
2546 }
2547 
2548 /**
2549  * irdma_get_dma_mr - register physical mem
2550  * @pd: ptr of pd
2551  * @acc: access for memory
2552  */
2553 static struct ib_mr *
2554 irdma_get_dma_mr(struct ib_pd *pd, int acc)
2555 {
2556 	u64 kva = 0;
2557 
2558 	return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
2559 }
2560 
2561 /**
2562  * irdma_del_memlist - Deleting pbl list entries for CQ/QP
2563  * @iwmr: iwmr for IB's user page addresses
2564  * @ucontext: ptr to user context
2565  */
2566 void
2567 irdma_del_memlist(struct irdma_mr *iwmr,
2568 		  struct irdma_ucontext *ucontext)
2569 {
2570 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2571 	unsigned long flags;
2572 
2573 	switch (iwmr->type) {
2574 	case IRDMA_MEMREG_TYPE_CQ:
2575 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2576 		if (iwpbl->on_list) {
2577 			iwpbl->on_list = false;
2578 			list_del(&iwpbl->list);
2579 		}
2580 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2581 		break;
2582 	case IRDMA_MEMREG_TYPE_QP:
2583 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2584 		if (iwpbl->on_list) {
2585 			iwpbl->on_list = false;
2586 			list_del(&iwpbl->list);
2587 		}
2588 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2589 		break;
2590 	default:
2591 		break;
2592 	}
2593 }
2594 
2595 /**
2596  * irdma_copy_sg_list - copy sg list for qp
2597  * @sg_list: copied into sg_list
2598  * @sgl: copy from sgl
2599  * @num_sges: count of sg entries
2600  */
2601 static void
2602 irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
2603 		   int num_sges)
2604 {
2605 	unsigned int i;
2606 
2607 	for (i = 0; i < num_sges; i++) {
2608 		sg_list[i].tag_off = sgl[i].addr;
2609 		sg_list[i].len = sgl[i].length;
2610 		sg_list[i].stag = sgl[i].lkey;
2611 	}
2612 }
2613 
2614 /**
2615  * irdma_post_send -  kernel application wr
2616  * @ibqp: qp ptr for wr
2617  * @ib_wr: work request ptr
2618  * @bad_wr: return of bad wr if err
2619  */
2620 static int
2621 irdma_post_send(struct ib_qp *ibqp,
2622 		const struct ib_send_wr *ib_wr,
2623 		const struct ib_send_wr **bad_wr)
2624 {
2625 	struct irdma_qp *iwqp;
2626 	struct irdma_qp_uk *ukqp;
2627 	struct irdma_sc_dev *dev;
2628 	struct irdma_post_sq_info info;
2629 	int err = 0;
2630 	unsigned long flags;
2631 	bool inv_stag;
2632 	struct irdma_ah *ah;
2633 
2634 	iwqp = to_iwqp(ibqp);
2635 	ukqp = &iwqp->sc_qp.qp_uk;
2636 	dev = &iwqp->iwdev->rf->sc_dev;
2637 
2638 	spin_lock_irqsave(&iwqp->lock, flags);
2639 	while (ib_wr) {
2640 		memset(&info, 0, sizeof(info));
2641 		inv_stag = false;
2642 		info.wr_id = (ib_wr->wr_id);
2643 		if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2644 			info.signaled = true;
2645 		if (ib_wr->send_flags & IB_SEND_FENCE)
2646 			info.read_fence = true;
2647 		switch (ib_wr->opcode) {
2648 		case IB_WR_SEND_WITH_IMM:
2649 			if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
2650 				info.imm_data_valid = true;
2651 				info.imm_data = ntohl(ib_wr->ex.imm_data);
2652 			} else {
2653 				err = -EINVAL;
2654 				break;
2655 			}
2656 			/* fallthrough */
2657 		case IB_WR_SEND:
2658 		case IB_WR_SEND_WITH_INV:
2659 			if (ib_wr->opcode == IB_WR_SEND ||
2660 			    ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
2661 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
2662 					info.op_type = IRDMA_OP_TYPE_SEND_SOL;
2663 				else
2664 					info.op_type = IRDMA_OP_TYPE_SEND;
2665 			} else {
2666 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
2667 					info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
2668 				else
2669 					info.op_type = IRDMA_OP_TYPE_SEND_INV;
2670 				info.stag_to_inv = ib_wr->ex.invalidate_rkey;
2671 			}
2672 
2673 			info.op.send.num_sges = ib_wr->num_sge;
2674 			info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
2675 			if (iwqp->ibqp.qp_type == IB_QPT_UD ||
2676 			    iwqp->ibqp.qp_type == IB_QPT_GSI) {
2677 				ah = to_iwah(ud_wr(ib_wr)->ah);
2678 				info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
2679 				info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
2680 				info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
2681 			}
2682 
2683 			if (ib_wr->send_flags & IB_SEND_INLINE)
2684 				err = irdma_uk_inline_send(ukqp, &info, false);
2685 			else
2686 				err = irdma_uk_send(ukqp, &info, false);
2687 			break;
2688 		case IB_WR_RDMA_WRITE_WITH_IMM:
2689 			if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
2690 				info.imm_data_valid = true;
2691 				info.imm_data = ntohl(ib_wr->ex.imm_data);
2692 			} else {
2693 				err = -EINVAL;
2694 				break;
2695 			}
2696 			/* fallthrough */
2697 		case IB_WR_RDMA_WRITE:
2698 			if (ib_wr->send_flags & IB_SEND_SOLICITED)
2699 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
2700 			else
2701 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
2702 
2703 			info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2704 			info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2705 			info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2706 			info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2707 			if (ib_wr->send_flags & IB_SEND_INLINE)
2708 				err = irdma_uk_inline_rdma_write(ukqp, &info, false);
2709 			else
2710 				err = irdma_uk_rdma_write(ukqp, &info, false);
2711 			break;
2712 		case IB_WR_RDMA_READ_WITH_INV:
2713 			inv_stag = true;
2714 			/* fallthrough */
2715 		case IB_WR_RDMA_READ:
2716 			if (ib_wr->num_sge >
2717 			    dev->hw_attrs.uk_attrs.max_hw_read_sges) {
2718 				err = -EINVAL;
2719 				break;
2720 			}
2721 			info.op_type = IRDMA_OP_TYPE_RDMA_READ;
2722 			info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2723 			info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2724 			info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
2725 			info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
2726 			err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
2727 			break;
2728 		case IB_WR_LOCAL_INV:
2729 			info.op_type = IRDMA_OP_TYPE_INV_STAG;
2730 			info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2731 			err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
2732 			break;
2733 		case IB_WR_REG_MR:{
2734 				struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2735 				struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2736 				struct irdma_fast_reg_stag_info stag_info = {0};
2737 
2738 				stag_info.signaled = info.signaled;
2739 				stag_info.read_fence = info.read_fence;
2740 				stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
2741 				stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
2742 				stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
2743 				stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
2744 				stag_info.wr_id = ib_wr->wr_id;
2745 				stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2746 				stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2747 				stag_info.total_len = iwmr->ibmr.length;
2748 				if (palloc->level == PBLE_LEVEL_2) {
2749 					stag_info.chunk_size = 3;
2750 					stag_info.first_pm_pbl_index = palloc->level2.root.idx;
2751 				} else {
2752 					stag_info.chunk_size = 1;
2753 					stag_info.first_pm_pbl_index = palloc->level1.idx;
2754 				}
2755 				stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2756 				err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
2757 								true);
2758 				break;
2759 			}
2760 		default:
2761 			err = -EINVAL;
2762 			irdma_debug(iwdev_to_idev(iwqp->iwdev),
2763 				    IRDMA_DEBUG_VERBS,
2764 				    "upost_send bad opcode = 0x%x\n",
2765 				    ib_wr->opcode);
2766 			break;
2767 		}
2768 
2769 		if (err)
2770 			break;
2771 		ib_wr = ib_wr->next;
2772 	}
2773 
2774 	if (!iwqp->flush_issued) {
2775 		if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
2776 			irdma_uk_qp_post_wr(ukqp);
2777 		spin_unlock_irqrestore(&iwqp->lock, flags);
2778 	} else {
2779 		spin_unlock_irqrestore(&iwqp->lock, flags);
2780 		irdma_sched_qp_flush_work(iwqp);
2781 	}
2782 	if (err)
2783 		*bad_wr = ib_wr;
2784 
2785 	return err;
2786 }
2787 
2788 /**
2789  * irdma_post_recv - post receive wr for kernel application
2790  * @ibqp: ib qp pointer
2791  * @ib_wr: work request for receive
2792  * @bad_wr: bad wr caused an error
2793  */
2794 static int
2795 irdma_post_recv(struct ib_qp *ibqp,
2796 		const struct ib_recv_wr *ib_wr,
2797 		const struct ib_recv_wr **bad_wr)
2798 {
2799 	struct irdma_qp *iwqp = to_iwqp(ibqp);
2800 	struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk;
2801 	struct irdma_post_rq_info post_recv = {0};
2802 	struct irdma_sge *sg_list = iwqp->sg_list;
2803 	unsigned long flags;
2804 	int err = 0;
2805 
2806 	spin_lock_irqsave(&iwqp->lock, flags);
2807 
2808 	while (ib_wr) {
2809 		if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) {
2810 			err = -EINVAL;
2811 			goto out;
2812 		}
2813 		post_recv.num_sges = ib_wr->num_sge;
2814 		post_recv.wr_id = ib_wr->wr_id;
2815 		irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2816 		post_recv.sg_list = sg_list;
2817 		err = irdma_uk_post_receive(ukqp, &post_recv);
2818 		if (err) {
2819 			irdma_debug(iwdev_to_idev(iwqp->iwdev),
2820 				    IRDMA_DEBUG_VERBS, "post_recv err %d\n",
2821 				    err);
2822 			goto out;
2823 		}
2824 
2825 		ib_wr = ib_wr->next;
2826 	}
2827 
2828 out:
2829 	spin_unlock_irqrestore(&iwqp->lock, flags);
2830 	if (iwqp->flush_issued)
2831 		irdma_sched_qp_flush_work(iwqp);
2832 	if (err)
2833 		*bad_wr = ib_wr;
2834 
2835 	return err;
2836 }
2837 
2838 /**
2839  * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
2840  * @opcode: iwarp flush code
2841  */
2842 static enum ib_wc_status
2843 irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
2844 {
2845 	switch (opcode) {
2846 	case FLUSH_PROT_ERR:
2847 		return IB_WC_LOC_PROT_ERR;
2848 	case FLUSH_REM_ACCESS_ERR:
2849 		return IB_WC_REM_ACCESS_ERR;
2850 	case FLUSH_LOC_QP_OP_ERR:
2851 		return IB_WC_LOC_QP_OP_ERR;
2852 	case FLUSH_REM_OP_ERR:
2853 		return IB_WC_REM_OP_ERR;
2854 	case FLUSH_LOC_LEN_ERR:
2855 		return IB_WC_LOC_LEN_ERR;
2856 	case FLUSH_GENERAL_ERR:
2857 		return IB_WC_WR_FLUSH_ERR;
2858 	case FLUSH_MW_BIND_ERR:
2859 		return IB_WC_MW_BIND_ERR;
2860 	case FLUSH_REM_INV_REQ_ERR:
2861 		return IB_WC_REM_INV_REQ_ERR;
2862 	case FLUSH_RETRY_EXC_ERR:
2863 		return IB_WC_RETRY_EXC_ERR;
2864 	case FLUSH_FATAL_ERR:
2865 	default:
2866 		return IB_WC_FATAL_ERR;
2867 	}
2868 }
2869 
2870 static inline void
2871 set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
2872 		struct ib_wc *entry)
2873 {
2874 	struct irdma_sc_qp *qp;
2875 
2876 	switch (cq_poll_info->op_type) {
2877 	case IRDMA_OP_TYPE_RDMA_WRITE:
2878 	case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
2879 		entry->opcode = IB_WC_RDMA_WRITE;
2880 		break;
2881 	case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
2882 	case IRDMA_OP_TYPE_RDMA_READ:
2883 		entry->opcode = IB_WC_RDMA_READ;
2884 		break;
2885 	case IRDMA_OP_TYPE_SEND_SOL:
2886 	case IRDMA_OP_TYPE_SEND_SOL_INV:
2887 	case IRDMA_OP_TYPE_SEND_INV:
2888 	case IRDMA_OP_TYPE_SEND:
2889 		entry->opcode = IB_WC_SEND;
2890 		break;
2891 	case IRDMA_OP_TYPE_FAST_REG_NSMR:
2892 		entry->opcode = IB_WC_REG_MR;
2893 		break;
2894 	case IRDMA_OP_TYPE_INV_STAG:
2895 		entry->opcode = IB_WC_LOCAL_INV;
2896 		break;
2897 	default:
2898 		qp = cq_poll_info->qp_handle;
2899 		ibdev_err(irdma_get_ibdev(qp->dev), "Invalid opcode = %d in CQE\n",
2900 			  cq_poll_info->op_type);
2901 		entry->status = IB_WC_GENERAL_ERR;
2902 	}
2903 }
2904 
2905 static inline void
2906 set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
2907 		struct ib_wc *entry, bool send_imm_support)
2908 {
2909 	/**
2910 	 * iWARP does not support sendImm, so the presence of Imm data
2911 	 * must be WriteImm.
2912 	 */
2913 	if (!send_imm_support) {
2914 		entry->opcode = cq_poll_info->imm_valid ?
2915 		    IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
2916 		return;
2917 	}
2918 	switch (cq_poll_info->op_type) {
2919 	case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
2920 	case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
2921 		entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2922 		break;
2923 	default:
2924 		entry->opcode = IB_WC_RECV;
2925 	}
2926 }
2927 
2928 /**
2929  * irdma_process_cqe - process cqe info
2930  * @entry: processed cqe
2931  * @cq_poll_info: cqe info
2932  */
2933 static void
2934 irdma_process_cqe(struct ib_wc *entry,
2935 		  struct irdma_cq_poll_info *cq_poll_info)
2936 {
2937 	struct irdma_sc_qp *qp;
2938 
2939 	entry->wc_flags = 0;
2940 	entry->pkey_index = 0;
2941 	entry->wr_id = cq_poll_info->wr_id;
2942 
2943 	qp = cq_poll_info->qp_handle;
2944 	entry->qp = qp->qp_uk.back_qp;
2945 
2946 	if (cq_poll_info->error) {
2947 		entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
2948 		    irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
2949 
2950 		entry->vendor_err = cq_poll_info->major_err << 16 |
2951 		    cq_poll_info->minor_err;
2952 	} else {
2953 		entry->status = IB_WC_SUCCESS;
2954 		if (cq_poll_info->imm_valid) {
2955 			entry->ex.imm_data = htonl(cq_poll_info->imm_data);
2956 			entry->wc_flags |= IB_WC_WITH_IMM;
2957 		}
2958 		if (cq_poll_info->ud_smac_valid) {
2959 			ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
2960 			entry->wc_flags |= IB_WC_WITH_SMAC;
2961 		}
2962 
2963 		if (cq_poll_info->ud_vlan_valid) {
2964 			u16 vlan = cq_poll_info->ud_vlan & EVL_VLID_MASK;
2965 
2966 			entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
2967 			if (vlan) {
2968 				entry->vlan_id = vlan;
2969 				entry->wc_flags |= IB_WC_WITH_VLAN;
2970 			}
2971 		} else {
2972 			entry->sl = 0;
2973 		}
2974 	}
2975 
2976 	if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
2977 		set_ib_wc_op_sq(cq_poll_info, entry);
2978 	} else {
2979 		set_ib_wc_op_rq(cq_poll_info, entry,
2980 				qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
2981 				true : false);
2982 		if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
2983 		    cq_poll_info->stag_invalid_set) {
2984 			entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
2985 			entry->wc_flags |= IB_WC_WITH_INVALIDATE;
2986 		}
2987 	}
2988 
2989 	if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
2990 		entry->src_qp = cq_poll_info->ud_src_qpn;
2991 		entry->slid = 0;
2992 		entry->wc_flags |=
2993 		    (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
2994 		entry->network_hdr_type = cq_poll_info->ipv4 ?
2995 		    RDMA_NETWORK_IPV4 :
2996 		    RDMA_NETWORK_IPV6;
2997 	} else {
2998 		entry->src_qp = cq_poll_info->qp_id;
2999 	}
3000 
3001 	entry->byte_len = cq_poll_info->bytes_xfered;
3002 }
3003 
3004 /**
3005  * irdma_poll_one - poll one entry of the CQ
3006  * @ukcq: ukcq to poll
3007  * @cur_cqe: current CQE info to be filled in
3008  * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3009  *
3010  * Returns the internal irdma device error code or 0 on success
3011  */
3012 static inline int
3013 irdma_poll_one(struct irdma_cq_uk *ukcq,
3014 	       struct irdma_cq_poll_info *cur_cqe,
3015 	       struct ib_wc *entry)
3016 {
3017 	int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3018 
3019 	if (ret)
3020 		return ret;
3021 
3022 	irdma_process_cqe(entry, cur_cqe);
3023 
3024 	return 0;
3025 }
3026 
3027 /**
3028  * __irdma_poll_cq - poll cq for completion (kernel apps)
3029  * @iwcq: cq to poll
3030  * @num_entries: number of entries to poll
3031  * @entry: wr of a completed entry
3032  */
3033 static int
3034 __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3035 {
3036 	struct list_head *tmp_node, *list_node;
3037 	struct irdma_cq_buf *last_buf = NULL;
3038 	struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3039 	struct irdma_cq_buf *cq_buf;
3040 	int ret;
3041 	struct irdma_device *iwdev;
3042 	struct irdma_cq_uk *ukcq;
3043 	bool cq_new_cqe = false;
3044 	int resized_bufs = 0;
3045 	int npolled = 0;
3046 
3047 	iwdev = to_iwdev(iwcq->ibcq.device);
3048 	ukcq = &iwcq->sc_cq.cq_uk;
3049 
3050 	/* go through the list of previously resized CQ buffers */
3051 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3052 		cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3053 		while (npolled < num_entries) {
3054 			ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3055 			if (!ret) {
3056 				++npolled;
3057 				cq_new_cqe = true;
3058 				continue;
3059 			}
3060 			if (ret == -ENOENT)
3061 				break;
3062 			/* QP using the CQ is destroyed. Skip reporting this CQE */
3063 			if (ret == -EFAULT) {
3064 				cq_new_cqe = true;
3065 				continue;
3066 			}
3067 			goto error;
3068 		}
3069 
3070 		/* save the resized CQ buffer which received the last cqe */
3071 		if (cq_new_cqe)
3072 			last_buf = cq_buf;
3073 		cq_new_cqe = false;
3074 	}
3075 
3076 	/* check the current CQ for new cqes */
3077 	while (npolled < num_entries) {
3078 		ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3079 		if (ret == -ENOENT) {
3080 			ret = irdma_generated_cmpls(iwcq, cur_cqe);
3081 			if (!ret)
3082 				irdma_process_cqe(entry + npolled, cur_cqe);
3083 		}
3084 		if (!ret) {
3085 			++npolled;
3086 			cq_new_cqe = true;
3087 			continue;
3088 		}
3089 
3090 		if (ret == -ENOENT)
3091 			break;
3092 		/* QP using the CQ is destroyed. Skip reporting this CQE */
3093 		if (ret == -EFAULT) {
3094 			cq_new_cqe = true;
3095 			continue;
3096 		}
3097 		goto error;
3098 	}
3099 
3100 	if (cq_new_cqe)
3101 		/* all previous CQ resizes are complete */
3102 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3103 	else if (last_buf)
3104 		/* only CQ resizes up to the last_buf are complete */
3105 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3106 	if (resized_bufs)
3107 		/* report to the HW the number of complete CQ resizes */
3108 		irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3109 
3110 	return npolled;
3111 error:
3112 	irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3113 		    "%s: Error polling CQ, irdma_err: %d\n", __func__, ret);
3114 
3115 	return ret;
3116 }
3117 
3118 /**
3119  * irdma_poll_cq - poll cq for completion (kernel apps)
3120  * @ibcq: cq to poll
3121  * @num_entries: number of entries to poll
3122  * @entry: wr of a completed entry
3123  */
3124 static int
3125 irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3126 	      struct ib_wc *entry)
3127 {
3128 	struct irdma_cq *iwcq;
3129 	unsigned long flags;
3130 	int ret;
3131 
3132 	iwcq = to_iwcq(ibcq);
3133 
3134 	spin_lock_irqsave(&iwcq->lock, flags);
3135 	ret = __irdma_poll_cq(iwcq, num_entries, entry);
3136 	spin_unlock_irqrestore(&iwcq->lock, flags);
3137 
3138 	return ret;
3139 }
3140 
3141 /**
3142  * irdma_req_notify_cq - arm cq kernel application
3143  * @ibcq: cq to arm
3144  * @notify_flags: notofication flags
3145  */
3146 static int
3147 irdma_req_notify_cq(struct ib_cq *ibcq,
3148 		    enum ib_cq_notify_flags notify_flags)
3149 {
3150 	struct irdma_cq *iwcq;
3151 	struct irdma_cq_uk *ukcq;
3152 	unsigned long flags;
3153 	enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
3154 	bool promo_event = false;
3155 	int ret = 0;
3156 
3157 	iwcq = to_iwcq(ibcq);
3158 	ukcq = &iwcq->sc_cq.cq_uk;
3159 
3160 	spin_lock_irqsave(&iwcq->lock, flags);
3161 	if (notify_flags == IB_CQ_SOLICITED) {
3162 		cq_notify = IRDMA_CQ_COMPL_SOLICITED;
3163 	} else {
3164 		if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED)
3165 			promo_event = true;
3166 	}
3167 
3168 	if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3169 		iwcq->last_notify = cq_notify;
3170 		irdma_uk_cq_request_notification(ukcq, cq_notify);
3171 	}
3172 
3173 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3174 	    (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3175 		ret = 1;
3176 	spin_unlock_irqrestore(&iwcq->lock, flags);
3177 
3178 	return ret;
3179 }
3180 
3181 /**
3182  * mcast_list_add -  Add a new mcast item to list
3183  * @rf: RDMA PCI function
3184  * @new_elem: pointer to element to add
3185  */
3186 static void
3187 mcast_list_add(struct irdma_pci_f *rf,
3188 	       struct mc_table_list *new_elem)
3189 {
3190 	list_add(&new_elem->list, &rf->mc_qht_list.list);
3191 }
3192 
3193 /**
3194  * mcast_list_del - Remove an mcast item from list
3195  * @mc_qht_elem: pointer to mcast table list element
3196  */
3197 static void
3198 mcast_list_del(struct mc_table_list *mc_qht_elem)
3199 {
3200 	if (mc_qht_elem)
3201 		list_del(&mc_qht_elem->list);
3202 }
3203 
3204 /**
3205  * mcast_list_lookup_ip - Search mcast list for address
3206  * @rf: RDMA PCI function
3207  * @ip_mcast: pointer to mcast IP address
3208  */
3209 static struct mc_table_list *
3210 mcast_list_lookup_ip(struct irdma_pci_f *rf,
3211 		     u32 *ip_mcast)
3212 {
3213 	struct mc_table_list *mc_qht_el;
3214 	struct list_head *pos, *q;
3215 
3216 	list_for_each_safe(pos, q, &rf->mc_qht_list.list) {
3217 		mc_qht_el = list_entry(pos, struct mc_table_list, list);
3218 		if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3219 			    sizeof(mc_qht_el->mc_info.dest_ip)))
3220 			return mc_qht_el;
3221 	}
3222 
3223 	return NULL;
3224 }
3225 
3226 /**
3227  * irdma_mcast_cqp_op - perform a mcast cqp operation
3228  * @iwdev: irdma device
3229  * @mc_grp_ctx: mcast group info
3230  * @op: operation
3231  *
3232  * returns error status
3233  */
3234 static int
3235 irdma_mcast_cqp_op(struct irdma_device *iwdev,
3236 		   struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3237 {
3238 	struct cqp_cmds_info *cqp_info;
3239 	struct irdma_cqp_request *cqp_request;
3240 	int status;
3241 
3242 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3243 	if (!cqp_request)
3244 		return -ENOMEM;
3245 
3246 	cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3247 	cqp_info = &cqp_request->info;
3248 	cqp_info->cqp_cmd = op;
3249 	cqp_info->post_sq = 1;
3250 	cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3251 	cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3252 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3253 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3254 
3255 	return status;
3256 }
3257 
3258 /**
3259  * irdma_attach_mcast - attach a qp to a multicast group
3260  * @ibqp: ptr to qp
3261  * @ibgid: pointer to global ID
3262  * @lid: local ID
3263  *
3264  * returns error status
3265  */
3266 static int
3267 irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3268 {
3269 	struct irdma_qp *iwqp = to_iwqp(ibqp);
3270 	struct irdma_device *iwdev = iwqp->iwdev;
3271 	struct irdma_pci_f *rf = iwdev->rf;
3272 	struct mc_table_list *mc_qht_elem;
3273 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
3274 	unsigned long flags;
3275 	u32 ip_addr[4] = {0};
3276 	u32 mgn;
3277 	u32 no_mgs;
3278 	int ret = 0;
3279 	bool ipv4;
3280 	u16 vlan_id;
3281 	union {
3282 		struct sockaddr saddr;
3283 		struct sockaddr_in saddr_in;
3284 		struct sockaddr_in6 saddr_in6;
3285 	} sgid_addr;
3286 	unsigned char dmac[ETH_ALEN];
3287 
3288 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3289 
3290 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3291 		irdma_copy_ip_ntohl(ip_addr,
3292 				    sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
3293 		irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
3294 		ipv4 = false;
3295 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3296 			    "qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
3297 			    ip_addr);
3298 		irdma_mcast_mac_v6(ip_addr, dmac);
3299 	} else {
3300 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3301 		ipv4 = true;
3302 		vlan_id = irdma_get_vlan_ipv4(ip_addr);
3303 		irdma_mcast_mac_v4(ip_addr, dmac);
3304 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3305 			    "qp_id=%d, IP4address=%pI4, MAC=%pM\n",
3306 			    ibqp->qp_num, ip_addr, dmac);
3307 	}
3308 
3309 	spin_lock_irqsave(&rf->qh_list_lock, flags);
3310 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3311 	if (!mc_qht_elem) {
3312 		struct irdma_dma_mem *dma_mem_mc;
3313 
3314 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3315 		mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
3316 		if (!mc_qht_elem)
3317 			return -ENOMEM;
3318 
3319 		mc_qht_elem->mc_info.ipv4_valid = ipv4;
3320 		memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
3321 		       sizeof(mc_qht_elem->mc_info.dest_ip));
3322 		ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
3323 				       &mgn, &rf->next_mcg);
3324 		if (ret) {
3325 			kfree(mc_qht_elem);
3326 			return -ENOMEM;
3327 		}
3328 
3329 		mc_qht_elem->mc_info.mgn = mgn;
3330 		dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
3331 		dma_mem_mc->size = sizeof(u64)* IRDMA_MAX_MGS_PER_CTX;
3332 		dma_mem_mc->va = irdma_allocate_dma_mem(&rf->hw, dma_mem_mc,
3333 							dma_mem_mc->size,
3334 							IRDMA_HW_PAGE_SIZE);
3335 		if (!dma_mem_mc->va) {
3336 			irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
3337 			kfree(mc_qht_elem);
3338 			return -ENOMEM;
3339 		}
3340 
3341 		mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
3342 		memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
3343 		       sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
3344 		mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
3345 		mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
3346 		if (vlan_id < VLAN_N_VID)
3347 			mc_qht_elem->mc_grp_ctx.vlan_valid = true;
3348 		mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
3349 		mc_qht_elem->mc_grp_ctx.qs_handle =
3350 		    iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
3351 		ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
3352 
3353 		spin_lock_irqsave(&rf->qh_list_lock, flags);
3354 		mcast_list_add(rf, mc_qht_elem);
3355 	} else {
3356 		if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
3357 		    IRDMA_MAX_MGS_PER_CTX) {
3358 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3359 			return -ENOMEM;
3360 		}
3361 	}
3362 
3363 	mcg_info.qp_id = iwqp->ibqp.qp_num;
3364 	no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
3365 	irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3366 	spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3367 
3368 	/* Only if there is a change do we need to modify or create */
3369 	if (!no_mgs) {
3370 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3371 					 IRDMA_OP_MC_CREATE);
3372 	} else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3373 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3374 					 IRDMA_OP_MC_MODIFY);
3375 	} else {
3376 		return 0;
3377 	}
3378 
3379 	if (ret)
3380 		goto error;
3381 
3382 	return 0;
3383 
3384 error:
3385 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3386 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3387 		mcast_list_del(mc_qht_elem);
3388 		irdma_free_dma_mem(&rf->hw,
3389 				   &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3390 		irdma_free_rsrc(rf, rf->allocated_mcgs,
3391 				mc_qht_elem->mc_grp_ctx.mg_id);
3392 		kfree(mc_qht_elem);
3393 	}
3394 
3395 	return ret;
3396 }
3397 
3398 /**
3399  * irdma_detach_mcast - detach a qp from a multicast group
3400  * @ibqp: ptr to qp
3401  * @ibgid: pointer to global ID
3402  * @lid: local ID
3403  *
3404  * returns error status
3405  */
3406 static int
3407 irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3408 {
3409 	struct irdma_qp *iwqp = to_iwqp(ibqp);
3410 	struct irdma_device *iwdev = iwqp->iwdev;
3411 	struct irdma_pci_f *rf = iwdev->rf;
3412 	u32 ip_addr[4] = {0};
3413 	struct mc_table_list *mc_qht_elem;
3414 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {0};
3415 	int ret;
3416 	unsigned long flags;
3417 	union {
3418 		struct sockaddr saddr;
3419 		struct sockaddr_in saddr_in;
3420 		struct sockaddr_in6 saddr_in6;
3421 	} sgid_addr;
3422 
3423 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3424 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
3425 		irdma_copy_ip_ntohl(ip_addr,
3426 				    sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
3427 	else
3428 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3429 
3430 	spin_lock_irqsave(&rf->qh_list_lock, flags);
3431 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3432 	if (!mc_qht_elem) {
3433 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3434 		irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3435 			    "address not found MCG\n");
3436 		return 0;
3437 	}
3438 
3439 	mcg_info.qp_id = iwqp->ibqp.qp_num;
3440 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3441 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3442 		mcast_list_del(mc_qht_elem);
3443 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3444 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3445 					 IRDMA_OP_MC_DESTROY);
3446 		if (ret) {
3447 			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3448 				    "failed MC_DESTROY MCG\n");
3449 			spin_lock_irqsave(&rf->qh_list_lock, flags);
3450 			mcast_list_add(rf, mc_qht_elem);
3451 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3452 			return -EAGAIN;
3453 		}
3454 
3455 		irdma_free_dma_mem(&rf->hw,
3456 				   &mc_qht_elem->mc_grp_ctx.dma_mem_mc);
3457 		irdma_free_rsrc(rf, rf->allocated_mcgs,
3458 				mc_qht_elem->mc_grp_ctx.mg_id);
3459 		kfree(mc_qht_elem);
3460 	} else {
3461 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3462 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3463 					 IRDMA_OP_MC_MODIFY);
3464 		if (ret) {
3465 			irdma_debug(iwdev_to_idev(iwdev), IRDMA_DEBUG_VERBS,
3466 				    "failed Modify MCG\n");
3467 			return ret;
3468 		}
3469 	}
3470 
3471 	return 0;
3472 }
3473 
3474 /**
3475  * irdma_query_ah - Query address handle
3476  * @ibah: pointer to address handle
3477  * @ah_attr: address handle attributes
3478  */
3479 static int
3480 irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
3481 {
3482 	struct irdma_ah *ah = to_iwah(ibah);
3483 
3484 	memset(ah_attr, 0, sizeof(*ah_attr));
3485 	if (ah->av.attrs.ah_flags & IB_AH_GRH) {
3486 		ah_attr->ah_flags = IB_AH_GRH;
3487 		ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
3488 		ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
3489 		ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
3490 		ah_attr->grh.sgid_index = ah->sgid_index;
3491 		ah_attr->grh.sgid_index = ah->sgid_index;
3492 		memcpy(&ah_attr->grh.dgid, &ah->dgid,
3493 		       sizeof(ah_attr->grh.dgid));
3494 	}
3495 
3496 	return 0;
3497 }
3498 
3499 static __be64 irdma_mac_to_guid(struct ifnet *ndev){
3500 	const unsigned char *mac = IF_LLADDR(ndev);
3501 	__be64 guid;
3502 	unsigned char *dst = (unsigned char *)&guid;
3503 
3504 	dst[0] = mac[0] ^ 2;
3505 	dst[1] = mac[1];
3506 	dst[2] = mac[2];
3507 	dst[3] = 0xff;
3508 	dst[4] = 0xfe;
3509 	dst[5] = mac[3];
3510 	dst[6] = mac[4];
3511 	dst[7] = mac[5];
3512 
3513 	return guid;
3514 }
3515 
3516 static struct ifnet *
3517 irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
3518 {
3519 	struct irdma_device *iwdev = to_iwdev(ibdev);
3520 
3521 	if (iwdev->netdev) {
3522 		dev_hold(iwdev->netdev);
3523 		return iwdev->netdev;
3524 	}
3525 
3526 	return NULL;
3527 }
3528 
3529 static void
3530 irdma_set_device_ops(struct ib_device *ibdev)
3531 {
3532 	struct ib_device *dev_ops = ibdev;
3533 
3534 #if __FreeBSD_version >= 1400000
3535 	dev_ops->ops.driver_id = RDMA_DRIVER_I40IW;
3536 	dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah);
3537 	dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq);
3538 	dev_ops->ops.size_ib_pd = IRDMA_SET_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd);
3539 	dev_ops->ops.size_ib_ucontext = IRDMA_SET_RDMA_OBJ_SIZE(ib_ucontext,
3540 								irdma_ucontext,
3541 								ibucontext);
3542 
3543 #endif				/* __FreeBSD_version >= 1400000 */
3544 	dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
3545 	dev_ops->alloc_mr = irdma_alloc_mr;
3546 	dev_ops->alloc_mw = irdma_alloc_mw;
3547 	dev_ops->alloc_pd = irdma_alloc_pd;
3548 	dev_ops->alloc_ucontext = irdma_alloc_ucontext;
3549 	dev_ops->create_cq = irdma_create_cq;
3550 	dev_ops->create_qp = irdma_create_qp;
3551 	dev_ops->dealloc_mw = irdma_dealloc_mw;
3552 	dev_ops->dealloc_pd = irdma_dealloc_pd;
3553 	dev_ops->dealloc_ucontext = irdma_dealloc_ucontext;
3554 	dev_ops->dereg_mr = irdma_dereg_mr;
3555 	dev_ops->destroy_cq = irdma_destroy_cq;
3556 	dev_ops->destroy_qp = irdma_destroy_qp;
3557 	dev_ops->disassociate_ucontext = irdma_disassociate_ucontext;
3558 	dev_ops->get_dev_fw_str = irdma_get_dev_fw_str;
3559 	dev_ops->get_dma_mr = irdma_get_dma_mr;
3560 	dev_ops->get_hw_stats = irdma_get_hw_stats;
3561 	dev_ops->get_netdev = irdma_get_netdev;
3562 	dev_ops->map_mr_sg = irdma_map_mr_sg;
3563 	dev_ops->mmap = irdma_mmap;
3564 #if __FreeBSD_version >= 1400026
3565 	dev_ops->mmap_free = irdma_mmap_free;
3566 #endif
3567 	dev_ops->poll_cq = irdma_poll_cq;
3568 	dev_ops->post_recv = irdma_post_recv;
3569 	dev_ops->post_send = irdma_post_send;
3570 	dev_ops->query_device = irdma_query_device;
3571 	dev_ops->query_port = irdma_query_port;
3572 	dev_ops->modify_port = irdma_modify_port;
3573 	dev_ops->query_qp = irdma_query_qp;
3574 	dev_ops->reg_user_mr = irdma_reg_user_mr;
3575 	dev_ops->rereg_user_mr = irdma_rereg_user_mr;
3576 	dev_ops->req_notify_cq = irdma_req_notify_cq;
3577 	dev_ops->resize_cq = irdma_resize_cq;
3578 }
3579 
3580 static void
3581 irdma_set_device_mcast_ops(struct ib_device *ibdev)
3582 {
3583 	struct ib_device *dev_ops = ibdev;
3584 	dev_ops->attach_mcast = irdma_attach_mcast;
3585 	dev_ops->detach_mcast = irdma_detach_mcast;
3586 }
3587 
3588 static void
3589 irdma_set_device_roce_ops(struct ib_device *ibdev)
3590 {
3591 	struct ib_device *dev_ops = ibdev;
3592 	dev_ops->create_ah = irdma_create_ah;
3593 	dev_ops->destroy_ah = irdma_destroy_ah;
3594 	dev_ops->get_link_layer = irdma_get_link_layer;
3595 	dev_ops->get_port_immutable = irdma_roce_port_immutable;
3596 	dev_ops->modify_qp = irdma_modify_qp_roce;
3597 	dev_ops->query_ah = irdma_query_ah;
3598 	dev_ops->query_gid = irdma_query_gid_roce;
3599 	dev_ops->query_pkey = irdma_query_pkey;
3600 	ibdev->add_gid = irdma_add_gid;
3601 	ibdev->del_gid = irdma_del_gid;
3602 }
3603 
3604 static void
3605 irdma_set_device_iw_ops(struct ib_device *ibdev)
3606 {
3607 	struct ib_device *dev_ops = ibdev;
3608 
3609 	ibdev->uverbs_cmd_mask |=
3610 	    (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
3611 	    (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
3612 
3613 	dev_ops->create_ah = irdma_create_ah_stub;
3614 	dev_ops->destroy_ah = irdma_destroy_ah_stub;
3615 	dev_ops->get_port_immutable = irdma_iw_port_immutable;
3616 	dev_ops->modify_qp = irdma_modify_qp;
3617 	dev_ops->query_gid = irdma_query_gid;
3618 	dev_ops->query_pkey = irdma_iw_query_pkey;
3619 }
3620 
3621 static inline void
3622 irdma_set_device_gen1_ops(struct ib_device *ibdev)
3623 {
3624 }
3625 
3626 /**
3627  * irdma_init_roce_device - initialization of roce rdma device
3628  * @iwdev: irdma device
3629  */
3630 static void
3631 irdma_init_roce_device(struct irdma_device *iwdev)
3632 {
3633 	kc_set_roce_uverbs_cmd_mask(iwdev);
3634 	iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
3635 	iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
3636 	irdma_set_device_roce_ops(&iwdev->ibdev);
3637 	if (iwdev->rf->rdma_ver == IRDMA_GEN_2)
3638 		irdma_set_device_mcast_ops(&iwdev->ibdev);
3639 }
3640 
3641 /**
3642  * irdma_init_iw_device - initialization of iwarp rdma device
3643  * @iwdev: irdma device
3644  */
3645 static int
3646 irdma_init_iw_device(struct irdma_device *iwdev)
3647 {
3648 	struct ifnet *netdev = iwdev->netdev;
3649 
3650 	iwdev->ibdev.node_type = RDMA_NODE_RNIC;
3651 	ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, IF_LLADDR(netdev));
3652 	iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL);
3653 	if (!iwdev->ibdev.iwcm)
3654 		return -ENOMEM;
3655 
3656 	iwdev->ibdev.iwcm->add_ref = irdma_qp_add_ref;
3657 	iwdev->ibdev.iwcm->rem_ref = irdma_qp_rem_ref;
3658 	iwdev->ibdev.iwcm->get_qp = irdma_get_qp;
3659 	iwdev->ibdev.iwcm->connect = irdma_connect;
3660 	iwdev->ibdev.iwcm->accept = irdma_accept;
3661 	iwdev->ibdev.iwcm->reject = irdma_reject;
3662 	iwdev->ibdev.iwcm->create_listen = irdma_create_listen;
3663 	iwdev->ibdev.iwcm->destroy_listen = irdma_destroy_listen;
3664 	memcpy(iwdev->ibdev.iwcm->ifname, if_name(netdev),
3665 	       sizeof(iwdev->ibdev.iwcm->ifname));
3666 	irdma_set_device_iw_ops(&iwdev->ibdev);
3667 
3668 	return 0;
3669 }
3670 
3671 /**
3672  * irdma_init_rdma_device - initialization of rdma device
3673  * @iwdev: irdma device
3674  */
3675 static int
3676 irdma_init_rdma_device(struct irdma_device *iwdev)
3677 {
3678 	struct pci_dev *pcidev = iwdev->rf->pcidev;
3679 	int ret;
3680 
3681 	iwdev->ibdev.owner = THIS_MODULE;
3682 	iwdev->ibdev.uverbs_abi_ver = IRDMA_ABI_VER;
3683 	kc_set_rdma_uverbs_cmd_mask(iwdev);
3684 
3685 	if (iwdev->roce_mode) {
3686 		irdma_init_roce_device(iwdev);
3687 	} else {
3688 		ret = irdma_init_iw_device(iwdev);
3689 		if (ret)
3690 			return ret;
3691 	}
3692 
3693 	iwdev->ibdev.phys_port_cnt = 1;
3694 	iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
3695 	iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev;
3696 	set_ibdev_dma_device(iwdev->ibdev, &pcidev->dev);
3697 	irdma_set_device_ops(&iwdev->ibdev);
3698 	if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
3699 		irdma_set_device_gen1_ops(&iwdev->ibdev);
3700 
3701 	return 0;
3702 }
3703 
3704 /**
3705  * irdma_port_ibevent - indicate port event
3706  * @iwdev: irdma device
3707  */
3708 void
3709 irdma_port_ibevent(struct irdma_device *iwdev)
3710 {
3711 	struct ib_event event;
3712 
3713 	event.device = &iwdev->ibdev;
3714 	event.element.port_num = 1;
3715 	event.event =
3716 	    iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3717 	ib_dispatch_event(&event);
3718 }
3719 
3720 /**
3721  * irdma_ib_unregister_device - unregister rdma device from IB
3722  * core
3723  * @iwdev: irdma device
3724  */
3725 void
3726 irdma_ib_unregister_device(struct irdma_device *iwdev)
3727 {
3728 	iwdev->iw_status = 0;
3729 	irdma_port_ibevent(iwdev);
3730 	ib_unregister_device(&iwdev->ibdev);
3731 	dev_put(iwdev->netdev);
3732 	kfree(iwdev->ibdev.iwcm);
3733 	iwdev->ibdev.iwcm = NULL;
3734 }
3735 
3736 /**
3737  * irdma_ib_register_device - register irdma device to IB core
3738  * @iwdev: irdma device
3739  */
3740 int
3741 irdma_ib_register_device(struct irdma_device *iwdev)
3742 {
3743 	int ret;
3744 
3745 	ret = irdma_init_rdma_device(iwdev);
3746 	if (ret)
3747 		return ret;
3748 
3749 	dev_hold(iwdev->netdev);
3750 	sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev));
3751 	ret = ib_register_device(&iwdev->ibdev, NULL);
3752 	if (ret)
3753 		goto error;
3754 
3755 	iwdev->iw_status = 1;
3756 	irdma_port_ibevent(iwdev);
3757 
3758 	return 0;
3759 
3760 error:
3761 	kfree(iwdev->ibdev.iwcm);
3762 	iwdev->ibdev.iwcm = NULL;
3763 	irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "Register RDMA device fail\n");
3764 
3765 	return ret;
3766 }
3767