xref: /linux/drivers/infiniband/hw/irdma/verbs.c (revision 78885597b9ccf68d4ce554aec98db01ee3c2d3fc)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4 
5 /**
6  * irdma_query_device - get device attributes
7  * @ibdev: device pointer from stack
8  * @props: returning device attributes
9  * @udata: user data
10  */
11 static int irdma_query_device(struct ib_device *ibdev,
12 			      struct ib_device_attr *props,
13 			      struct ib_udata *udata)
14 {
15 	struct irdma_device *iwdev = to_iwdev(ibdev);
16 	struct irdma_pci_f *rf = iwdev->rf;
17 	struct pci_dev *pcidev = iwdev->rf->pcidev;
18 	struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
19 
20 	if (udata->inlen || udata->outlen)
21 		return -EINVAL;
22 
23 	memset(props, 0, sizeof(*props));
24 	addrconf_addr_eui48((u8 *)&props->sys_image_guid,
25 			    iwdev->netdev->dev_addr);
26 	props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
27 			irdma_fw_minor_ver(&rf->sc_dev);
28 	props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
29 				  IB_DEVICE_MEM_MGT_EXTENSIONS;
30 	props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
31 	props->vendor_id = pcidev->vendor;
32 	props->vendor_part_id = pcidev->device;
33 
34 	props->hw_ver = rf->pcidev->revision;
35 	props->page_size_cap = hw_attrs->page_size_cap;
36 	props->max_mr_size = hw_attrs->max_mr_size;
37 	props->max_qp = rf->max_qp - rf->used_qps;
38 	props->max_qp_wr = hw_attrs->max_qp_wr;
39 	props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
40 	props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
41 	props->max_cq = rf->max_cq - rf->used_cqs;
42 	props->max_cqe = rf->max_cqe - 1;
43 	props->max_mr = rf->max_mr - rf->used_mrs;
44 	props->max_mw = props->max_mr;
45 	props->max_pd = rf->max_pd - rf->used_pds;
46 	props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
47 	props->max_qp_rd_atom = hw_attrs->max_hw_ird;
48 	props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
49 	if (rdma_protocol_roce(ibdev, 1)) {
50 		props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
51 		props->max_pkeys = IRDMA_PKEY_TBL_SZ;
52 	}
53 
54 	props->max_ah = rf->max_ah;
55 	props->max_mcast_grp = rf->max_mcg;
56 	props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
57 	props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
58 	props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
59 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
60 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
61 		props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
62 
63 	return 0;
64 }
65 
66 /**
67  * irdma_query_port - get port attributes
68  * @ibdev: device pointer from stack
69  * @port: port number for query
70  * @props: returning device attributes
71  */
72 static int irdma_query_port(struct ib_device *ibdev, u32 port,
73 			    struct ib_port_attr *props)
74 {
75 	struct irdma_device *iwdev = to_iwdev(ibdev);
76 	struct net_device *netdev = iwdev->netdev;
77 
78 	/* no need to zero out pros here. done by caller */
79 
80 	props->max_mtu = IB_MTU_4096;
81 	props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
82 	props->lid = 1;
83 	props->lmc = 0;
84 	props->sm_lid = 0;
85 	props->sm_sl = 0;
86 	if (netif_carrier_ok(netdev) && netif_running(netdev)) {
87 		props->state = IB_PORT_ACTIVE;
88 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
89 	} else {
90 		props->state = IB_PORT_DOWN;
91 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
92 	}
93 
94 	ib_get_eth_speed(ibdev, port, &props->active_speed,
95 			 &props->active_width);
96 
97 	if (rdma_protocol_roce(ibdev, 1)) {
98 		props->gid_tbl_len = 32;
99 		props->ip_gids = true;
100 		props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
101 	} else {
102 		props->gid_tbl_len = 1;
103 	}
104 	props->qkey_viol_cntr = 0;
105 	props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
106 	props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
107 
108 	return 0;
109 }
110 
111 /**
112  * irdma_disassociate_ucontext - Disassociate user context
113  * @context: ib user context
114  */
115 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
116 {
117 }
118 
119 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
120 			     struct vm_area_struct *vma)
121 {
122 	u64 pfn;
123 
124 	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
125 		return -EINVAL;
126 
127 	vma->vm_private_data = ucontext;
128 	pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
129 	       pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
130 
131 	return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
132 				 pgprot_noncached(vma->vm_page_prot), NULL);
133 }
134 
135 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
136 {
137 	struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
138 
139 	kfree(entry);
140 }
141 
142 static struct rdma_user_mmap_entry*
143 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
144 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
145 {
146 	struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
147 	int ret;
148 
149 	if (!entry)
150 		return NULL;
151 
152 	entry->bar_offset = bar_offset;
153 	entry->mmap_flag = mmap_flag;
154 
155 	ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
156 					  &entry->rdma_entry, PAGE_SIZE);
157 	if (ret) {
158 		kfree(entry);
159 		return NULL;
160 	}
161 	*mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
162 
163 	return &entry->rdma_entry;
164 }
165 
166 /**
167  * irdma_mmap - user memory map
168  * @context: context created during alloc
169  * @vma: kernel info for user memory map
170  */
171 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
172 {
173 	struct rdma_user_mmap_entry *rdma_entry;
174 	struct irdma_user_mmap_entry *entry;
175 	struct irdma_ucontext *ucontext;
176 	u64 pfn;
177 	int ret;
178 
179 	ucontext = to_ucontext(context);
180 
181 	/* Legacy support for libi40iw with hard-coded mmap key */
182 	if (ucontext->legacy_mode)
183 		return irdma_mmap_legacy(ucontext, vma);
184 
185 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
186 	if (!rdma_entry) {
187 		ibdev_dbg(&ucontext->iwdev->ibdev,
188 			  "VERBS: pgoff[0x%lx] does not have valid entry\n",
189 			  vma->vm_pgoff);
190 		return -EINVAL;
191 	}
192 
193 	entry = to_irdma_mmap_entry(rdma_entry);
194 	ibdev_dbg(&ucontext->iwdev->ibdev,
195 		  "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
196 		  entry->bar_offset, entry->mmap_flag);
197 
198 	pfn = (entry->bar_offset +
199 	      pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
200 
201 	switch (entry->mmap_flag) {
202 	case IRDMA_MMAP_IO_NC:
203 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
204 					pgprot_noncached(vma->vm_page_prot),
205 					rdma_entry);
206 		break;
207 	case IRDMA_MMAP_IO_WC:
208 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
209 					pgprot_writecombine(vma->vm_page_prot),
210 					rdma_entry);
211 		break;
212 	default:
213 		ret = -EINVAL;
214 	}
215 
216 	if (ret)
217 		ibdev_dbg(&ucontext->iwdev->ibdev,
218 			  "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
219 			  entry->bar_offset, entry->mmap_flag, ret);
220 	rdma_user_mmap_entry_put(rdma_entry);
221 
222 	return ret;
223 }
224 
225 /**
226  * irdma_alloc_push_page - allocate a push page for qp
227  * @iwqp: qp pointer
228  */
229 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
230 {
231 	struct irdma_cqp_request *cqp_request;
232 	struct cqp_cmds_info *cqp_info;
233 	struct irdma_device *iwdev = iwqp->iwdev;
234 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
235 	int status;
236 
237 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
238 	if (!cqp_request)
239 		return;
240 
241 	cqp_info = &cqp_request->info;
242 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
243 	cqp_info->post_sq = 1;
244 	cqp_info->in.u.manage_push_page.info.push_idx = 0;
245 	cqp_info->in.u.manage_push_page.info.qs_handle =
246 		qp->vsi->qos[qp->user_pri].qs_handle;
247 	cqp_info->in.u.manage_push_page.info.free_page = 0;
248 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
249 	cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
250 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
251 
252 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
253 	if (!status && cqp_request->compl_info.op_ret_val <
254 	    iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
255 		qp->push_idx = cqp_request->compl_info.op_ret_val;
256 		qp->push_offset = 0;
257 	}
258 
259 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
260 }
261 
262 /**
263  * irdma_alloc_ucontext - Allocate the user context data structure
264  * @uctx: uverbs context pointer
265  * @udata: user data
266  *
267  * This keeps track of all objects associated with a particular
268  * user-mode client.
269  */
270 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
271 				struct ib_udata *udata)
272 {
273 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
274 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
275 	struct ib_device *ibdev = uctx->device;
276 	struct irdma_device *iwdev = to_iwdev(ibdev);
277 	struct irdma_alloc_ucontext_req req = {};
278 	struct irdma_alloc_ucontext_resp uresp = {};
279 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
280 	struct irdma_uk_attrs *uk_attrs;
281 
282 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
283 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
284 		return -EINVAL;
285 
286 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
287 		return -EINVAL;
288 
289 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
290 		goto ver_error;
291 
292 	ucontext->iwdev = iwdev;
293 	ucontext->abi_ver = req.userspace_ver;
294 
295 	uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
296 	/* GEN_1 legacy support with libi40iw */
297 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
298 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
299 			return -EOPNOTSUPP;
300 
301 		ucontext->legacy_mode = true;
302 		uresp.max_qps = iwdev->rf->max_qp;
303 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
304 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
305 		uresp.kernel_ver = req.userspace_ver;
306 		if (ib_copy_to_udata(udata, &uresp,
307 				     min(sizeof(uresp), udata->outlen)))
308 			return -EFAULT;
309 	} else {
310 		u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
311 
312 		ucontext->db_mmap_entry =
313 			irdma_user_mmap_entry_insert(ucontext, bar_off,
314 						     IRDMA_MMAP_IO_NC,
315 						     &uresp.db_mmap_key);
316 		if (!ucontext->db_mmap_entry)
317 			return -ENOMEM;
318 
319 		uresp.kernel_ver = IRDMA_ABI_VER;
320 		uresp.feature_flags = uk_attrs->feature_flags;
321 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
322 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
323 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
324 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
325 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
326 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
327 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
328 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
329 		uresp.hw_rev = uk_attrs->hw_rev;
330 		if (ib_copy_to_udata(udata, &uresp,
331 				     min(sizeof(uresp), udata->outlen))) {
332 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
333 			return -EFAULT;
334 		}
335 	}
336 
337 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
338 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
339 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
340 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
341 
342 	return 0;
343 
344 ver_error:
345 	ibdev_err(&iwdev->ibdev,
346 		  "Invalid userspace driver version detected. Detected version %d, should be %d\n",
347 		  req.userspace_ver, IRDMA_ABI_VER);
348 	return -EINVAL;
349 }
350 
351 /**
352  * irdma_dealloc_ucontext - deallocate the user context data structure
353  * @context: user context created during alloc
354  */
355 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
356 {
357 	struct irdma_ucontext *ucontext = to_ucontext(context);
358 
359 	rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
360 }
361 
362 /**
363  * irdma_alloc_pd - allocate protection domain
364  * @pd: PD pointer
365  * @udata: user data
366  */
367 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
368 {
369 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
370 	struct irdma_pd *iwpd = to_iwpd(pd);
371 	struct irdma_device *iwdev = to_iwdev(pd->device);
372 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
373 	struct irdma_pci_f *rf = iwdev->rf;
374 	struct irdma_alloc_pd_resp uresp = {};
375 	struct irdma_sc_pd *sc_pd;
376 	u32 pd_id = 0;
377 	int err;
378 
379 	if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
380 		return -EINVAL;
381 
382 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
383 			       &rf->next_pd);
384 	if (err)
385 		return err;
386 
387 	sc_pd = &iwpd->sc_pd;
388 	if (udata) {
389 		struct irdma_ucontext *ucontext =
390 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
391 						  ibucontext);
392 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
393 		uresp.pd_id = pd_id;
394 		if (ib_copy_to_udata(udata, &uresp,
395 				     min(sizeof(uresp), udata->outlen))) {
396 			err = -EFAULT;
397 			goto error;
398 		}
399 	} else {
400 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
401 	}
402 
403 	return 0;
404 error:
405 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
406 
407 	return err;
408 }
409 
410 /**
411  * irdma_dealloc_pd - deallocate pd
412  * @ibpd: ptr of pd to be deallocated
413  * @udata: user data
414  */
415 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
416 {
417 	struct irdma_pd *iwpd = to_iwpd(ibpd);
418 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
419 
420 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
421 
422 	return 0;
423 }
424 
425 /**
426  * irdma_get_pbl - Retrieve pbl from a list given a virtual
427  * address
428  * @va: user virtual address
429  * @pbl_list: pbl list to search in (QP's or CQ's)
430  */
431 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
432 				       struct list_head *pbl_list)
433 {
434 	struct irdma_pbl *iwpbl;
435 
436 	list_for_each_entry (iwpbl, pbl_list, list) {
437 		if (iwpbl->user_base == va) {
438 			list_del(&iwpbl->list);
439 			iwpbl->on_list = false;
440 			return iwpbl;
441 		}
442 	}
443 
444 	return NULL;
445 }
446 
447 /**
448  * irdma_clean_cqes - clean cq entries for qp
449  * @iwqp: qp ptr (user or kernel)
450  * @iwcq: cq ptr
451  */
452 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
453 {
454 	struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
455 	unsigned long flags;
456 
457 	spin_lock_irqsave(&iwcq->lock, flags);
458 	irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
459 	spin_unlock_irqrestore(&iwcq->lock, flags);
460 }
461 
462 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
463 {
464 	if (iwqp->push_db_mmap_entry) {
465 		rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
466 		iwqp->push_db_mmap_entry = NULL;
467 	}
468 	if (iwqp->push_wqe_mmap_entry) {
469 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
470 		iwqp->push_wqe_mmap_entry = NULL;
471 	}
472 }
473 
474 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
475 					 struct irdma_qp *iwqp,
476 					 u64 *push_wqe_mmap_key,
477 					 u64 *push_db_mmap_key)
478 {
479 	struct irdma_device *iwdev = ucontext->iwdev;
480 	u64 rsvd, bar_off;
481 
482 	rsvd = IRDMA_PF_BAR_RSVD;
483 	bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
484 	/* skip over db page */
485 	bar_off += IRDMA_HW_PAGE_SIZE;
486 	/* push wqe page */
487 	bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
488 	iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
489 					bar_off, IRDMA_MMAP_IO_WC,
490 					push_wqe_mmap_key);
491 	if (!iwqp->push_wqe_mmap_entry)
492 		return -ENOMEM;
493 
494 	/* push doorbell page */
495 	bar_off += IRDMA_HW_PAGE_SIZE;
496 	iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
497 					bar_off, IRDMA_MMAP_IO_NC,
498 					push_db_mmap_key);
499 	if (!iwqp->push_db_mmap_entry) {
500 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
501 		return -ENOMEM;
502 	}
503 
504 	return 0;
505 }
506 
507 /**
508  * irdma_destroy_qp - destroy qp
509  * @ibqp: qp's ib pointer also to get to device's qp address
510  * @udata: user data
511  */
512 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
513 {
514 	struct irdma_qp *iwqp = to_iwqp(ibqp);
515 	struct irdma_device *iwdev = iwqp->iwdev;
516 
517 	iwqp->sc_qp.qp_uk.destroy_pending = true;
518 
519 	if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
520 		irdma_modify_qp_to_err(&iwqp->sc_qp);
521 
522 	if (!iwqp->user_mode)
523 		cancel_delayed_work_sync(&iwqp->dwork_flush);
524 
525 	irdma_qp_rem_ref(&iwqp->ibqp);
526 	wait_for_completion(&iwqp->free_qp);
527 	irdma_free_lsmm_rsrc(iwqp);
528 	irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
529 
530 	if (!iwqp->user_mode) {
531 		if (iwqp->iwscq) {
532 			irdma_clean_cqes(iwqp, iwqp->iwscq);
533 			if (iwqp->iwrcq != iwqp->iwscq)
534 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
535 		}
536 	}
537 	irdma_remove_push_mmap_entries(iwqp);
538 	irdma_free_qp_rsrc(iwqp);
539 
540 	return 0;
541 }
542 
543 /**
544  * irdma_setup_virt_qp - setup for allocation of virtual qp
545  * @iwdev: irdma device
546  * @iwqp: qp ptr
547  * @init_info: initialize info to return
548  */
549 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
550 			       struct irdma_qp *iwqp,
551 			       struct irdma_qp_init_info *init_info)
552 {
553 	struct irdma_pbl *iwpbl = iwqp->iwpbl;
554 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
555 
556 	iwqp->page = qpmr->sq_page;
557 	init_info->shadow_area_pa = qpmr->shadow;
558 	if (iwpbl->pbl_allocated) {
559 		init_info->virtual_map = true;
560 		init_info->sq_pa = qpmr->sq_pbl.idx;
561 		init_info->rq_pa = qpmr->rq_pbl.idx;
562 	} else {
563 		init_info->sq_pa = qpmr->sq_pbl.addr;
564 		init_info->rq_pa = qpmr->rq_pbl.addr;
565 	}
566 }
567 
568 /**
569  * irdma_setup_kmode_qp - setup initialization for kernel mode qp
570  * @iwdev: iwarp device
571  * @iwqp: qp ptr (user or kernel)
572  * @info: initialize info to return
573  * @init_attr: Initial QP create attributes
574  */
575 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
576 				struct irdma_qp *iwqp,
577 				struct irdma_qp_init_info *info,
578 				struct ib_qp_init_attr *init_attr)
579 {
580 	struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
581 	u32 sqdepth, rqdepth;
582 	u8 sqshift, rqshift;
583 	u32 size;
584 	int status;
585 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
586 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
587 
588 	irdma_get_wqe_shift(uk_attrs,
589 		uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
590 						  ukinfo->max_sq_frag_cnt,
591 		ukinfo->max_inline_data, &sqshift);
592 	status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
593 				   &sqdepth);
594 	if (status)
595 		return status;
596 
597 	if (uk_attrs->hw_rev == IRDMA_GEN_1)
598 		rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
599 	else
600 		irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
601 				    &rqshift);
602 
603 	status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
604 				   &rqdepth);
605 	if (status)
606 		return status;
607 
608 	iwqp->kqp.sq_wrid_mem =
609 		kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
610 	if (!iwqp->kqp.sq_wrid_mem)
611 		return -ENOMEM;
612 
613 	iwqp->kqp.rq_wrid_mem =
614 		kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
615 	if (!iwqp->kqp.rq_wrid_mem) {
616 		kfree(iwqp->kqp.sq_wrid_mem);
617 		iwqp->kqp.sq_wrid_mem = NULL;
618 		return -ENOMEM;
619 	}
620 
621 	ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
622 	ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
623 
624 	size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
625 	size += (IRDMA_SHADOW_AREA_SIZE << 3);
626 
627 	mem->size = ALIGN(size, 256);
628 	mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
629 				     &mem->pa, GFP_KERNEL);
630 	if (!mem->va) {
631 		kfree(iwqp->kqp.sq_wrid_mem);
632 		iwqp->kqp.sq_wrid_mem = NULL;
633 		kfree(iwqp->kqp.rq_wrid_mem);
634 		iwqp->kqp.rq_wrid_mem = NULL;
635 		return -ENOMEM;
636 	}
637 
638 	ukinfo->sq = mem->va;
639 	info->sq_pa = mem->pa;
640 	ukinfo->rq = &ukinfo->sq[sqdepth];
641 	info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
642 	ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
643 	info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
644 	ukinfo->sq_size = sqdepth >> sqshift;
645 	ukinfo->rq_size = rqdepth >> rqshift;
646 	ukinfo->qp_id = iwqp->ibqp.qp_num;
647 
648 	init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
649 	init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
650 
651 	return 0;
652 }
653 
654 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
655 {
656 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
657 	struct irdma_cqp_request *cqp_request;
658 	struct cqp_cmds_info *cqp_info;
659 	struct irdma_create_qp_info *qp_info;
660 	int status;
661 
662 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
663 	if (!cqp_request)
664 		return -ENOMEM;
665 
666 	cqp_info = &cqp_request->info;
667 	qp_info = &cqp_request->info.in.u.qp_create.info;
668 	memset(qp_info, 0, sizeof(*qp_info));
669 	qp_info->mac_valid = true;
670 	qp_info->cq_num_valid = true;
671 	qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
672 
673 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
674 	cqp_info->post_sq = 1;
675 	cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
676 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
677 	status = irdma_handle_cqp_op(rf, cqp_request);
678 	irdma_put_cqp_request(&rf->cqp, cqp_request);
679 
680 	return status;
681 }
682 
683 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
684 					       struct irdma_qp_host_ctx_info *ctx_info)
685 {
686 	struct irdma_device *iwdev = iwqp->iwdev;
687 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
688 	struct irdma_roce_offload_info *roce_info;
689 	struct irdma_udp_offload_info *udp_info;
690 
691 	udp_info = &iwqp->udp_info;
692 	udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
693 	udp_info->cwnd = iwdev->roce_cwnd;
694 	udp_info->rexmit_thresh = 2;
695 	udp_info->rnr_nak_thresh = 2;
696 	udp_info->src_port = 0xc000;
697 	udp_info->dst_port = ROCE_V2_UDP_DPORT;
698 	roce_info = &iwqp->roce_info;
699 	ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
700 
701 	roce_info->rd_en = true;
702 	roce_info->wr_rdresp_en = true;
703 	roce_info->bind_en = true;
704 	roce_info->dcqcn_en = false;
705 	roce_info->rtomin = 5;
706 
707 	roce_info->ack_credits = iwdev->roce_ackcreds;
708 	roce_info->ird_size = dev->hw_attrs.max_hw_ird;
709 	roce_info->ord_size = dev->hw_attrs.max_hw_ord;
710 
711 	if (!iwqp->user_mode) {
712 		roce_info->priv_mode_en = true;
713 		roce_info->fast_reg_en = true;
714 		roce_info->udprivcq_en = true;
715 	}
716 	roce_info->roce_tver = 0;
717 
718 	ctx_info->roce_info = &iwqp->roce_info;
719 	ctx_info->udp_info = &iwqp->udp_info;
720 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
721 }
722 
723 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
724 					     struct irdma_qp_host_ctx_info *ctx_info)
725 {
726 	struct irdma_device *iwdev = iwqp->iwdev;
727 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
728 	struct irdma_iwarp_offload_info *iwarp_info;
729 
730 	iwarp_info = &iwqp->iwarp_info;
731 	ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
732 	iwarp_info->rd_en = true;
733 	iwarp_info->wr_rdresp_en = true;
734 	iwarp_info->bind_en = true;
735 	iwarp_info->ecn_en = true;
736 	iwarp_info->rtomin = 5;
737 
738 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
739 		iwarp_info->ib_rd_en = true;
740 	if (!iwqp->user_mode) {
741 		iwarp_info->priv_mode_en = true;
742 		iwarp_info->fast_reg_en = true;
743 	}
744 	iwarp_info->ddp_ver = 1;
745 	iwarp_info->rdmap_ver = 1;
746 
747 	ctx_info->iwarp_info = &iwqp->iwarp_info;
748 	ctx_info->iwarp_info_valid = true;
749 	irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
750 	ctx_info->iwarp_info_valid = false;
751 }
752 
753 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
754 				   struct irdma_device *iwdev)
755 {
756 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
757 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
758 
759 	if (init_attr->create_flags)
760 		return -EOPNOTSUPP;
761 
762 	if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
763 	    init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
764 	    init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
765 		return -EINVAL;
766 
767 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
768 		if (init_attr->qp_type != IB_QPT_RC &&
769 		    init_attr->qp_type != IB_QPT_UD &&
770 		    init_attr->qp_type != IB_QPT_GSI)
771 			return -EOPNOTSUPP;
772 	} else {
773 		if (init_attr->qp_type != IB_QPT_RC)
774 			return -EOPNOTSUPP;
775 	}
776 
777 	return 0;
778 }
779 
780 static void irdma_flush_worker(struct work_struct *work)
781 {
782 	struct delayed_work *dwork = to_delayed_work(work);
783 	struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
784 
785 	irdma_generate_flush_completions(iwqp);
786 }
787 
788 /**
789  * irdma_create_qp - create qp
790  * @ibqp: ptr of qp
791  * @init_attr: attributes for qp
792  * @udata: user data for create qp
793  */
794 static int irdma_create_qp(struct ib_qp *ibqp,
795 			   struct ib_qp_init_attr *init_attr,
796 			   struct ib_udata *udata)
797 {
798 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
799 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
800 	struct ib_pd *ibpd = ibqp->pd;
801 	struct irdma_pd *iwpd = to_iwpd(ibpd);
802 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
803 	struct irdma_pci_f *rf = iwdev->rf;
804 	struct irdma_qp *iwqp = to_iwqp(ibqp);
805 	struct irdma_create_qp_req req = {};
806 	struct irdma_create_qp_resp uresp = {};
807 	u32 qp_num = 0;
808 	int err_code;
809 	int sq_size;
810 	int rq_size;
811 	struct irdma_sc_qp *qp;
812 	struct irdma_sc_dev *dev = &rf->sc_dev;
813 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
814 	struct irdma_qp_init_info init_info = {};
815 	struct irdma_qp_host_ctx_info *ctx_info;
816 	unsigned long flags;
817 
818 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
819 	if (err_code)
820 		return err_code;
821 
822 	if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
823 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
824 		return -EINVAL;
825 
826 	sq_size = init_attr->cap.max_send_wr;
827 	rq_size = init_attr->cap.max_recv_wr;
828 
829 	init_info.vsi = &iwdev->vsi;
830 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
831 	init_info.qp_uk_init_info.sq_size = sq_size;
832 	init_info.qp_uk_init_info.rq_size = rq_size;
833 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
834 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
835 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
836 
837 	qp = &iwqp->sc_qp;
838 	qp->qp_uk.back_qp = iwqp;
839 	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
840 
841 	iwqp->iwdev = iwdev;
842 	iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
843 				      256);
844 	iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
845 						 iwqp->q2_ctx_mem.size,
846 						 &iwqp->q2_ctx_mem.pa,
847 						 GFP_KERNEL);
848 	if (!iwqp->q2_ctx_mem.va)
849 		return -ENOMEM;
850 
851 	init_info.q2 = iwqp->q2_ctx_mem.va;
852 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
853 	init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
854 	init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
855 
856 	if (init_attr->qp_type == IB_QPT_GSI)
857 		qp_num = 1;
858 	else
859 		err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
860 					    &qp_num, &rf->next_qp);
861 	if (err_code)
862 		goto error;
863 
864 	iwqp->iwpd = iwpd;
865 	iwqp->ibqp.qp_num = qp_num;
866 	qp = &iwqp->sc_qp;
867 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
868 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
869 	iwqp->host_ctx.va = init_info.host_ctx;
870 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
871 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
872 
873 	init_info.pd = &iwpd->sc_pd;
874 	init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
875 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
876 		init_info.qp_uk_init_info.first_sq_wq = 1;
877 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
878 	init_waitqueue_head(&iwqp->waitq);
879 	init_waitqueue_head(&iwqp->mod_qp_waitq);
880 
881 	if (udata) {
882 		err_code = ib_copy_from_udata(&req, udata,
883 					      min(sizeof(req), udata->inlen));
884 		if (err_code) {
885 			ibdev_dbg(&iwdev->ibdev,
886 				  "VERBS: ib_copy_from_data fail\n");
887 			goto error;
888 		}
889 
890 		iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
891 		iwqp->user_mode = 1;
892 		if (req.user_wqe_bufs) {
893 			struct irdma_ucontext *ucontext =
894 				rdma_udata_to_drv_context(udata,
895 							  struct irdma_ucontext,
896 							  ibucontext);
897 
898 			init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
899 			spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
900 			iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
901 						    &ucontext->qp_reg_mem_list);
902 			spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
903 
904 			if (!iwqp->iwpbl) {
905 				err_code = -ENODATA;
906 				ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
907 				goto error;
908 			}
909 		}
910 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
911 		irdma_setup_virt_qp(iwdev, iwqp, &init_info);
912 	} else {
913 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
914 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
915 		err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
916 	}
917 
918 	if (err_code) {
919 		ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
920 		goto error;
921 	}
922 
923 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
924 		if (init_attr->qp_type == IB_QPT_RC) {
925 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
926 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
927 							    IRDMA_WRITE_WITH_IMM |
928 							    IRDMA_ROCE;
929 		} else {
930 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
931 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
932 							    IRDMA_ROCE;
933 		}
934 	} else {
935 		init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
936 		init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
937 	}
938 
939 	if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
940 		init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
941 
942 	err_code = irdma_sc_qp_init(qp, &init_info);
943 	if (err_code) {
944 		ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
945 		goto error;
946 	}
947 
948 	ctx_info = &iwqp->ctx_info;
949 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
950 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
951 
952 	if (rdma_protocol_roce(&iwdev->ibdev, 1))
953 		irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
954 	else
955 		irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
956 
957 	err_code = irdma_cqp_create_qp_cmd(iwqp);
958 	if (err_code)
959 		goto error;
960 
961 	refcount_set(&iwqp->refcnt, 1);
962 	spin_lock_init(&iwqp->lock);
963 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
964 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
965 	rf->qp_table[qp_num] = iwqp;
966 	iwqp->max_send_wr = sq_size;
967 	iwqp->max_recv_wr = rq_size;
968 
969 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
970 		if (dev->ws_add(&iwdev->vsi, 0)) {
971 			irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
972 			err_code = -EINVAL;
973 			goto error;
974 		}
975 
976 		irdma_qp_add_qos(&iwqp->sc_qp);
977 	}
978 
979 	if (udata) {
980 		/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
981 		if (udata->outlen < sizeof(uresp)) {
982 			uresp.lsmm = 1;
983 			uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
984 		} else {
985 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
986 				uresp.lsmm = 1;
987 		}
988 		uresp.actual_sq_size = sq_size;
989 		uresp.actual_rq_size = rq_size;
990 		uresp.qp_id = qp_num;
991 		uresp.qp_caps = qp->qp_uk.qp_caps;
992 
993 		err_code = ib_copy_to_udata(udata, &uresp,
994 					    min(sizeof(uresp), udata->outlen));
995 		if (err_code) {
996 			ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
997 			irdma_destroy_qp(&iwqp->ibqp, udata);
998 			return err_code;
999 		}
1000 	}
1001 
1002 	init_completion(&iwqp->free_qp);
1003 	return 0;
1004 
1005 error:
1006 	irdma_free_qp_rsrc(iwqp);
1007 	return err_code;
1008 }
1009 
1010 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1011 {
1012 	int acc_flags = 0;
1013 
1014 	if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1015 		if (iwqp->roce_info.wr_rdresp_en) {
1016 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1017 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1018 		}
1019 		if (iwqp->roce_info.rd_en)
1020 			acc_flags |= IB_ACCESS_REMOTE_READ;
1021 		if (iwqp->roce_info.bind_en)
1022 			acc_flags |= IB_ACCESS_MW_BIND;
1023 	} else {
1024 		if (iwqp->iwarp_info.wr_rdresp_en) {
1025 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1026 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1027 		}
1028 		if (iwqp->iwarp_info.rd_en)
1029 			acc_flags |= IB_ACCESS_REMOTE_READ;
1030 		if (iwqp->iwarp_info.bind_en)
1031 			acc_flags |= IB_ACCESS_MW_BIND;
1032 	}
1033 	return acc_flags;
1034 }
1035 
1036 /**
1037  * irdma_query_qp - query qp attributes
1038  * @ibqp: qp pointer
1039  * @attr: attributes pointer
1040  * @attr_mask: Not used
1041  * @init_attr: qp attributes to return
1042  */
1043 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1044 			  int attr_mask, struct ib_qp_init_attr *init_attr)
1045 {
1046 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1047 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
1048 
1049 	memset(attr, 0, sizeof(*attr));
1050 	memset(init_attr, 0, sizeof(*init_attr));
1051 
1052 	attr->qp_state = iwqp->ibqp_state;
1053 	attr->cur_qp_state = iwqp->ibqp_state;
1054 	attr->cap.max_send_wr = iwqp->max_send_wr;
1055 	attr->cap.max_recv_wr = iwqp->max_recv_wr;
1056 	attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1057 	attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1058 	attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1059 	attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1060 	attr->port_num = 1;
1061 	if (rdma_protocol_roce(ibqp->device, 1)) {
1062 		attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1063 		attr->qkey = iwqp->roce_info.qkey;
1064 		attr->rq_psn = iwqp->udp_info.epsn;
1065 		attr->sq_psn = iwqp->udp_info.psn_nxt;
1066 		attr->dest_qp_num = iwqp->roce_info.dest_qp;
1067 		attr->pkey_index = iwqp->roce_info.p_key;
1068 		attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1069 		attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1070 		attr->max_rd_atomic = iwqp->roce_info.ord_size;
1071 		attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1072 	}
1073 
1074 	init_attr->event_handler = iwqp->ibqp.event_handler;
1075 	init_attr->qp_context = iwqp->ibqp.qp_context;
1076 	init_attr->send_cq = iwqp->ibqp.send_cq;
1077 	init_attr->recv_cq = iwqp->ibqp.recv_cq;
1078 	init_attr->cap = attr->cap;
1079 
1080 	return 0;
1081 }
1082 
1083 /**
1084  * irdma_query_pkey - Query partition key
1085  * @ibdev: device pointer from stack
1086  * @port: port number
1087  * @index: index of pkey
1088  * @pkey: pointer to store the pkey
1089  */
1090 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1091 			    u16 *pkey)
1092 {
1093 	if (index >= IRDMA_PKEY_TBL_SZ)
1094 		return -EINVAL;
1095 
1096 	*pkey = IRDMA_DEFAULT_PKEY;
1097 	return 0;
1098 }
1099 
1100 /**
1101  * irdma_modify_qp_roce - modify qp request
1102  * @ibqp: qp's pointer for modify
1103  * @attr: access attributes
1104  * @attr_mask: state mask
1105  * @udata: user data
1106  */
1107 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1108 			 int attr_mask, struct ib_udata *udata)
1109 {
1110 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1111 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1112 	struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1113 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1114 	struct irdma_device *iwdev = iwqp->iwdev;
1115 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1116 	struct irdma_qp_host_ctx_info *ctx_info;
1117 	struct irdma_roce_offload_info *roce_info;
1118 	struct irdma_udp_offload_info *udp_info;
1119 	struct irdma_modify_qp_info info = {};
1120 	struct irdma_modify_qp_resp uresp = {};
1121 	struct irdma_modify_qp_req ureq = {};
1122 	unsigned long flags;
1123 	u8 issue_modify_qp = 0;
1124 	int ret = 0;
1125 
1126 	ctx_info = &iwqp->ctx_info;
1127 	roce_info = &iwqp->roce_info;
1128 	udp_info = &iwqp->udp_info;
1129 
1130 	if (udata) {
1131 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1132 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1133 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1134 			return -EINVAL;
1135 	}
1136 
1137 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1138 		return -EOPNOTSUPP;
1139 
1140 	if (attr_mask & IB_QP_DEST_QPN)
1141 		roce_info->dest_qp = attr->dest_qp_num;
1142 
1143 	if (attr_mask & IB_QP_PKEY_INDEX) {
1144 		ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1145 				       &roce_info->p_key);
1146 		if (ret)
1147 			return ret;
1148 	}
1149 
1150 	if (attr_mask & IB_QP_QKEY)
1151 		roce_info->qkey = attr->qkey;
1152 
1153 	if (attr_mask & IB_QP_PATH_MTU)
1154 		udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1155 
1156 	if (attr_mask & IB_QP_SQ_PSN) {
1157 		udp_info->psn_nxt = attr->sq_psn;
1158 		udp_info->lsn =  0xffff;
1159 		udp_info->psn_una = attr->sq_psn;
1160 		udp_info->psn_max = attr->sq_psn;
1161 	}
1162 
1163 	if (attr_mask & IB_QP_RQ_PSN)
1164 		udp_info->epsn = attr->rq_psn;
1165 
1166 	if (attr_mask & IB_QP_RNR_RETRY)
1167 		udp_info->rnr_nak_thresh = attr->rnr_retry;
1168 
1169 	if (attr_mask & IB_QP_RETRY_CNT)
1170 		udp_info->rexmit_thresh = attr->retry_cnt;
1171 
1172 	ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1173 
1174 	if (attr_mask & IB_QP_AV) {
1175 		struct irdma_av *av = &iwqp->roce_ah.av;
1176 		const struct ib_gid_attr *sgid_attr;
1177 		u16 vlan_id = VLAN_N_VID;
1178 		u32 local_ip[4];
1179 
1180 		memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1181 		if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1182 			udp_info->ttl = attr->ah_attr.grh.hop_limit;
1183 			udp_info->flow_label = attr->ah_attr.grh.flow_label;
1184 			udp_info->tos = attr->ah_attr.grh.traffic_class;
1185 			udp_info->src_port =
1186 				rdma_get_udp_sport(udp_info->flow_label,
1187 						   ibqp->qp_num,
1188 						   roce_info->dest_qp);
1189 			irdma_qp_rem_qos(&iwqp->sc_qp);
1190 			dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1191 			ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1192 			iwqp->sc_qp.user_pri = ctx_info->user_pri;
1193 			if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1194 				return -ENOMEM;
1195 			irdma_qp_add_qos(&iwqp->sc_qp);
1196 		}
1197 		sgid_attr = attr->ah_attr.grh.sgid_attr;
1198 		ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1199 					      ctx_info->roce_info->mac_addr);
1200 		if (ret)
1201 			return ret;
1202 
1203 		if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1204 			vlan_id = 0;
1205 		if (vlan_id < VLAN_N_VID) {
1206 			udp_info->insert_vlan_tag = true;
1207 			udp_info->vlan_tag = vlan_id |
1208 				ctx_info->user_pri << VLAN_PRIO_SHIFT;
1209 		} else {
1210 			udp_info->insert_vlan_tag = false;
1211 		}
1212 
1213 		av->attrs = attr->ah_attr;
1214 		rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1215 		rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1216 		av->net_type = rdma_gid_attr_network_type(sgid_attr);
1217 		if (av->net_type == RDMA_NETWORK_IPV6) {
1218 			__be32 *daddr =
1219 				av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1220 			__be32 *saddr =
1221 				av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1222 
1223 			irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1224 			irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1225 
1226 			udp_info->ipv4 = false;
1227 			irdma_copy_ip_ntohl(local_ip, daddr);
1228 
1229 			udp_info->arp_idx = irdma_arp_table(iwdev->rf,
1230 							    &local_ip[0],
1231 							    false, NULL,
1232 							    IRDMA_ARP_RESOLVE);
1233 		} else if (av->net_type == RDMA_NETWORK_IPV4) {
1234 			__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1235 			__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1236 
1237 			local_ip[0] = ntohl(daddr);
1238 
1239 			udp_info->ipv4 = true;
1240 			udp_info->dest_ip_addr[0] = 0;
1241 			udp_info->dest_ip_addr[1] = 0;
1242 			udp_info->dest_ip_addr[2] = 0;
1243 			udp_info->dest_ip_addr[3] = local_ip[0];
1244 
1245 			udp_info->local_ipaddr[0] = 0;
1246 			udp_info->local_ipaddr[1] = 0;
1247 			udp_info->local_ipaddr[2] = 0;
1248 			udp_info->local_ipaddr[3] = ntohl(saddr);
1249 		}
1250 		udp_info->arp_idx =
1251 			irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1252 				      attr->ah_attr.roce.dmac);
1253 	}
1254 
1255 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1256 		if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1257 			ibdev_err(&iwdev->ibdev,
1258 				  "rd_atomic = %d, above max_hw_ord=%d\n",
1259 				  attr->max_rd_atomic,
1260 				  dev->hw_attrs.max_hw_ord);
1261 			return -EINVAL;
1262 		}
1263 		if (attr->max_rd_atomic)
1264 			roce_info->ord_size = attr->max_rd_atomic;
1265 		info.ord_valid = true;
1266 	}
1267 
1268 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1269 		if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1270 			ibdev_err(&iwdev->ibdev,
1271 				  "rd_atomic = %d, above max_hw_ird=%d\n",
1272 				   attr->max_rd_atomic,
1273 				   dev->hw_attrs.max_hw_ird);
1274 			return -EINVAL;
1275 		}
1276 		if (attr->max_dest_rd_atomic)
1277 			roce_info->ird_size = attr->max_dest_rd_atomic;
1278 	}
1279 
1280 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1281 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1282 			roce_info->wr_rdresp_en = true;
1283 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1284 			roce_info->wr_rdresp_en = true;
1285 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1286 			roce_info->rd_en = true;
1287 	}
1288 
1289 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1290 
1291 	ibdev_dbg(&iwdev->ibdev,
1292 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1293 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1294 		  iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1295 
1296 	spin_lock_irqsave(&iwqp->lock, flags);
1297 	if (attr_mask & IB_QP_STATE) {
1298 		if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1299 					iwqp->ibqp.qp_type, attr_mask)) {
1300 			ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1301 				   iwqp->ibqp.qp_num, iwqp->ibqp_state,
1302 				   attr->qp_state);
1303 			ret = -EINVAL;
1304 			goto exit;
1305 		}
1306 		info.curr_iwarp_state = iwqp->iwarp_state;
1307 
1308 		switch (attr->qp_state) {
1309 		case IB_QPS_INIT:
1310 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1311 				ret = -EINVAL;
1312 				goto exit;
1313 			}
1314 
1315 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1316 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1317 				issue_modify_qp = 1;
1318 			}
1319 			break;
1320 		case IB_QPS_RTR:
1321 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1322 				ret = -EINVAL;
1323 				goto exit;
1324 			}
1325 			info.arp_cache_idx_valid = true;
1326 			info.cq_num_valid = true;
1327 			info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1328 			issue_modify_qp = 1;
1329 			break;
1330 		case IB_QPS_RTS:
1331 			if (iwqp->ibqp_state < IB_QPS_RTR ||
1332 			    iwqp->ibqp_state == IB_QPS_ERR) {
1333 				ret = -EINVAL;
1334 				goto exit;
1335 			}
1336 
1337 			info.arp_cache_idx_valid = true;
1338 			info.cq_num_valid = true;
1339 			info.ord_valid = true;
1340 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1341 			issue_modify_qp = 1;
1342 			if (iwdev->push_mode && udata &&
1343 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1344 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1345 				spin_unlock_irqrestore(&iwqp->lock, flags);
1346 				irdma_alloc_push_page(iwqp);
1347 				spin_lock_irqsave(&iwqp->lock, flags);
1348 			}
1349 			break;
1350 		case IB_QPS_SQD:
1351 			if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1352 				goto exit;
1353 
1354 			if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1355 				ret = -EINVAL;
1356 				goto exit;
1357 			}
1358 
1359 			info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1360 			issue_modify_qp = 1;
1361 			break;
1362 		case IB_QPS_SQE:
1363 		case IB_QPS_ERR:
1364 		case IB_QPS_RESET:
1365 			if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1366 				spin_unlock_irqrestore(&iwqp->lock, flags);
1367 				info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1368 				irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1369 				spin_lock_irqsave(&iwqp->lock, flags);
1370 			}
1371 
1372 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1373 				spin_unlock_irqrestore(&iwqp->lock, flags);
1374 				if (udata && udata->inlen) {
1375 					if (ib_copy_from_udata(&ureq, udata,
1376 					    min(sizeof(ureq), udata->inlen)))
1377 						return -EINVAL;
1378 
1379 					irdma_flush_wqes(iwqp,
1380 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1381 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1382 					    IRDMA_REFLUSH);
1383 				}
1384 				return 0;
1385 			}
1386 
1387 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1388 			issue_modify_qp = 1;
1389 			break;
1390 		default:
1391 			ret = -EINVAL;
1392 			goto exit;
1393 		}
1394 
1395 		iwqp->ibqp_state = attr->qp_state;
1396 	}
1397 
1398 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1399 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1400 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1401 	spin_unlock_irqrestore(&iwqp->lock, flags);
1402 
1403 	if (attr_mask & IB_QP_STATE) {
1404 		if (issue_modify_qp) {
1405 			ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1406 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1407 				return -EINVAL;
1408 			spin_lock_irqsave(&iwqp->lock, flags);
1409 			if (iwqp->iwarp_state == info.curr_iwarp_state) {
1410 				iwqp->iwarp_state = info.next_iwarp_state;
1411 				iwqp->ibqp_state = attr->qp_state;
1412 			}
1413 			if (iwqp->ibqp_state > IB_QPS_RTS &&
1414 			    !iwqp->flush_issued) {
1415 				spin_unlock_irqrestore(&iwqp->lock, flags);
1416 				irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1417 						       IRDMA_FLUSH_RQ |
1418 						       IRDMA_FLUSH_WAIT);
1419 				iwqp->flush_issued = 1;
1420 			} else {
1421 				spin_unlock_irqrestore(&iwqp->lock, flags);
1422 			}
1423 		} else {
1424 			iwqp->ibqp_state = attr->qp_state;
1425 		}
1426 		if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1427 			struct irdma_ucontext *ucontext;
1428 
1429 			ucontext = rdma_udata_to_drv_context(udata,
1430 					struct irdma_ucontext, ibucontext);
1431 			if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1432 			    !iwqp->push_wqe_mmap_entry &&
1433 			    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1434 				&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1435 				uresp.push_valid = 1;
1436 				uresp.push_offset = iwqp->sc_qp.push_offset;
1437 			}
1438 			ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1439 					       udata->outlen));
1440 			if (ret) {
1441 				irdma_remove_push_mmap_entries(iwqp);
1442 				ibdev_dbg(&iwdev->ibdev,
1443 					  "VERBS: copy_to_udata failed\n");
1444 				return ret;
1445 			}
1446 		}
1447 	}
1448 
1449 	return 0;
1450 exit:
1451 	spin_unlock_irqrestore(&iwqp->lock, flags);
1452 
1453 	return ret;
1454 }
1455 
1456 /**
1457  * irdma_modify_qp - modify qp request
1458  * @ibqp: qp's pointer for modify
1459  * @attr: access attributes
1460  * @attr_mask: state mask
1461  * @udata: user data
1462  */
1463 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1464 		    struct ib_udata *udata)
1465 {
1466 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1467 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1468 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1469 	struct irdma_device *iwdev = iwqp->iwdev;
1470 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1471 	struct irdma_qp_host_ctx_info *ctx_info;
1472 	struct irdma_tcp_offload_info *tcp_info;
1473 	struct irdma_iwarp_offload_info *offload_info;
1474 	struct irdma_modify_qp_info info = {};
1475 	struct irdma_modify_qp_resp uresp = {};
1476 	struct irdma_modify_qp_req ureq = {};
1477 	u8 issue_modify_qp = 0;
1478 	u8 dont_wait = 0;
1479 	int err;
1480 	unsigned long flags;
1481 
1482 	if (udata) {
1483 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1484 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1485 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1486 			return -EINVAL;
1487 	}
1488 
1489 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1490 		return -EOPNOTSUPP;
1491 
1492 	ctx_info = &iwqp->ctx_info;
1493 	offload_info = &iwqp->iwarp_info;
1494 	tcp_info = &iwqp->tcp_info;
1495 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1496 	ibdev_dbg(&iwdev->ibdev,
1497 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1498 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1499 		  iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1500 		  iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1501 
1502 	spin_lock_irqsave(&iwqp->lock, flags);
1503 	if (attr_mask & IB_QP_STATE) {
1504 		info.curr_iwarp_state = iwqp->iwarp_state;
1505 		switch (attr->qp_state) {
1506 		case IB_QPS_INIT:
1507 		case IB_QPS_RTR:
1508 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1509 				err = -EINVAL;
1510 				goto exit;
1511 			}
1512 
1513 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1514 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1515 				issue_modify_qp = 1;
1516 			}
1517 			if (iwdev->push_mode && udata &&
1518 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1519 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1520 				spin_unlock_irqrestore(&iwqp->lock, flags);
1521 				irdma_alloc_push_page(iwqp);
1522 				spin_lock_irqsave(&iwqp->lock, flags);
1523 			}
1524 			break;
1525 		case IB_QPS_RTS:
1526 			if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1527 			    !iwqp->cm_id) {
1528 				err = -EINVAL;
1529 				goto exit;
1530 			}
1531 
1532 			issue_modify_qp = 1;
1533 			iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1534 			iwqp->hte_added = 1;
1535 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1536 			info.tcp_ctx_valid = true;
1537 			info.ord_valid = true;
1538 			info.arp_cache_idx_valid = true;
1539 			info.cq_num_valid = true;
1540 			break;
1541 		case IB_QPS_SQD:
1542 			if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1543 				err = 0;
1544 				goto exit;
1545 			}
1546 
1547 			if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1548 			    iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1549 				err = 0;
1550 				goto exit;
1551 			}
1552 
1553 			if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1554 				err = -EINVAL;
1555 				goto exit;
1556 			}
1557 
1558 			info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1559 			issue_modify_qp = 1;
1560 			break;
1561 		case IB_QPS_SQE:
1562 			if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1563 				err = -EINVAL;
1564 				goto exit;
1565 			}
1566 
1567 			info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1568 			issue_modify_qp = 1;
1569 			break;
1570 		case IB_QPS_ERR:
1571 		case IB_QPS_RESET:
1572 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1573 				spin_unlock_irqrestore(&iwqp->lock, flags);
1574 				if (udata && udata->inlen) {
1575 					if (ib_copy_from_udata(&ureq, udata,
1576 					    min(sizeof(ureq), udata->inlen)))
1577 						return -EINVAL;
1578 
1579 					irdma_flush_wqes(iwqp,
1580 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1581 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1582 					    IRDMA_REFLUSH);
1583 				}
1584 				return 0;
1585 			}
1586 
1587 			if (iwqp->sc_qp.term_flags) {
1588 				spin_unlock_irqrestore(&iwqp->lock, flags);
1589 				irdma_terminate_del_timer(&iwqp->sc_qp);
1590 				spin_lock_irqsave(&iwqp->lock, flags);
1591 			}
1592 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1593 			if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1594 			    iwdev->iw_status &&
1595 			    iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1596 				info.reset_tcp_conn = true;
1597 			else
1598 				dont_wait = 1;
1599 
1600 			issue_modify_qp = 1;
1601 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1602 			break;
1603 		default:
1604 			err = -EINVAL;
1605 			goto exit;
1606 		}
1607 
1608 		iwqp->ibqp_state = attr->qp_state;
1609 	}
1610 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1611 		ctx_info->iwarp_info_valid = true;
1612 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1613 			offload_info->wr_rdresp_en = true;
1614 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1615 			offload_info->wr_rdresp_en = true;
1616 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1617 			offload_info->rd_en = true;
1618 	}
1619 
1620 	if (ctx_info->iwarp_info_valid) {
1621 		ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1622 		ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1623 		irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1624 	}
1625 	spin_unlock_irqrestore(&iwqp->lock, flags);
1626 
1627 	if (attr_mask & IB_QP_STATE) {
1628 		if (issue_modify_qp) {
1629 			ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1630 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1631 				return -EINVAL;
1632 		}
1633 
1634 		spin_lock_irqsave(&iwqp->lock, flags);
1635 		if (iwqp->iwarp_state == info.curr_iwarp_state) {
1636 			iwqp->iwarp_state = info.next_iwarp_state;
1637 			iwqp->ibqp_state = attr->qp_state;
1638 		}
1639 		spin_unlock_irqrestore(&iwqp->lock, flags);
1640 	}
1641 
1642 	if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1643 		if (dont_wait) {
1644 			if (iwqp->hw_tcp_state) {
1645 				spin_lock_irqsave(&iwqp->lock, flags);
1646 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1647 				iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1648 				spin_unlock_irqrestore(&iwqp->lock, flags);
1649 			}
1650 			irdma_cm_disconn(iwqp);
1651 		} else {
1652 			int close_timer_started;
1653 
1654 			spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1655 
1656 			if (iwqp->cm_node) {
1657 				refcount_inc(&iwqp->cm_node->refcnt);
1658 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1659 				close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1660 				if (iwqp->cm_id && close_timer_started == 1)
1661 					irdma_schedule_cm_timer(iwqp->cm_node,
1662 						(struct irdma_puda_buf *)iwqp,
1663 						IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1664 
1665 				irdma_rem_ref_cm_node(iwqp->cm_node);
1666 			} else {
1667 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1668 			}
1669 		}
1670 	}
1671 	if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1672 	    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1673 		struct irdma_ucontext *ucontext;
1674 
1675 		ucontext = rdma_udata_to_drv_context(udata,
1676 					struct irdma_ucontext, ibucontext);
1677 		if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1678 		    !iwqp->push_wqe_mmap_entry &&
1679 		    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1680 			&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1681 			uresp.push_valid = 1;
1682 			uresp.push_offset = iwqp->sc_qp.push_offset;
1683 		}
1684 
1685 		err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1686 				       udata->outlen));
1687 		if (err) {
1688 			irdma_remove_push_mmap_entries(iwqp);
1689 			ibdev_dbg(&iwdev->ibdev,
1690 				  "VERBS: copy_to_udata failed\n");
1691 			return err;
1692 		}
1693 	}
1694 
1695 	return 0;
1696 exit:
1697 	spin_unlock_irqrestore(&iwqp->lock, flags);
1698 
1699 	return err;
1700 }
1701 
1702 /**
1703  * irdma_cq_free_rsrc - free up resources for cq
1704  * @rf: RDMA PCI function
1705  * @iwcq: cq ptr
1706  */
1707 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1708 {
1709 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1710 
1711 	if (!iwcq->user_mode) {
1712 		dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1713 				  iwcq->kmem.va, iwcq->kmem.pa);
1714 		iwcq->kmem.va = NULL;
1715 		dma_free_coherent(rf->sc_dev.hw->device,
1716 				  iwcq->kmem_shadow.size,
1717 				  iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1718 		iwcq->kmem_shadow.va = NULL;
1719 	}
1720 
1721 	irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1722 }
1723 
1724 /**
1725  * irdma_free_cqbuf - worker to free a cq buffer
1726  * @work: provides access to the cq buffer to free
1727  */
1728 static void irdma_free_cqbuf(struct work_struct *work)
1729 {
1730 	struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1731 
1732 	dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1733 			  cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1734 	cq_buf->kmem_buf.va = NULL;
1735 	kfree(cq_buf);
1736 }
1737 
1738 /**
1739  * irdma_process_resize_list - remove resized cq buffers from the resize_list
1740  * @iwcq: cq which owns the resize_list
1741  * @iwdev: irdma device
1742  * @lcqe_buf: the buffer where the last cqe is received
1743  */
1744 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1745 				     struct irdma_device *iwdev,
1746 				     struct irdma_cq_buf *lcqe_buf)
1747 {
1748 	struct list_head *tmp_node, *list_node;
1749 	struct irdma_cq_buf *cq_buf;
1750 	int cnt = 0;
1751 
1752 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1753 		cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1754 		if (cq_buf == lcqe_buf)
1755 			return cnt;
1756 
1757 		list_del(&cq_buf->list);
1758 		queue_work(iwdev->cleanup_wq, &cq_buf->work);
1759 		cnt++;
1760 	}
1761 
1762 	return cnt;
1763 }
1764 
1765 /**
1766  * irdma_destroy_cq - destroy cq
1767  * @ib_cq: cq pointer
1768  * @udata: user data
1769  */
1770 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1771 {
1772 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1773 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1774 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1775 	struct irdma_sc_dev *dev = cq->dev;
1776 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1777 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1778 	unsigned long flags;
1779 
1780 	spin_lock_irqsave(&iwcq->lock, flags);
1781 	if (!list_empty(&iwcq->cmpl_generated))
1782 		irdma_remove_cmpls_list(iwcq);
1783 	if (!list_empty(&iwcq->resize_list))
1784 		irdma_process_resize_list(iwcq, iwdev, NULL);
1785 	spin_unlock_irqrestore(&iwcq->lock, flags);
1786 
1787 	irdma_cq_wq_destroy(iwdev->rf, cq);
1788 
1789 	spin_lock_irqsave(&iwceq->ce_lock, flags);
1790 	irdma_sc_cleanup_ceqes(cq, ceq);
1791 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1792 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
1793 
1794 	return 0;
1795 }
1796 
1797 /**
1798  * irdma_resize_cq - resize cq
1799  * @ibcq: cq to be resized
1800  * @entries: desired cq size
1801  * @udata: user data
1802  */
1803 static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
1804 			   struct ib_udata *udata)
1805 {
1806 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
1807 	struct irdma_cq *iwcq = to_iwcq(ibcq);
1808 	struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1809 	struct irdma_cqp_request *cqp_request;
1810 	struct cqp_cmds_info *cqp_info;
1811 	struct irdma_modify_cq_info *m_info;
1812 	struct irdma_modify_cq_info info = {};
1813 	struct irdma_dma_mem kmem_buf;
1814 	struct irdma_cq_mr *cqmr_buf;
1815 	struct irdma_pbl *iwpbl_buf;
1816 	struct irdma_device *iwdev;
1817 	struct irdma_pci_f *rf;
1818 	struct irdma_cq_buf *cq_buf = NULL;
1819 	unsigned long flags;
1820 	int ret;
1821 
1822 	iwdev = to_iwdev(ibcq->device);
1823 	rf = iwdev->rf;
1824 
1825 	if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1826 	    IRDMA_FEATURE_CQ_RESIZE))
1827 		return -EOPNOTSUPP;
1828 
1829 	if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
1830 		return -EINVAL;
1831 
1832 	if (entries > rf->max_cqe)
1833 		return -EINVAL;
1834 
1835 	if (!iwcq->user_mode) {
1836 		entries++;
1837 		if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1838 			entries *= 2;
1839 	}
1840 
1841 	info.cq_size = max(entries, 4);
1842 
1843 	if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1844 		return 0;
1845 
1846 	if (udata) {
1847 		struct irdma_resize_cq_req req = {};
1848 		struct irdma_ucontext *ucontext =
1849 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1850 						  ibucontext);
1851 
1852 		/* CQ resize not supported with legacy GEN_1 libi40iw */
1853 		if (ucontext->legacy_mode)
1854 			return -EOPNOTSUPP;
1855 
1856 		if (ib_copy_from_udata(&req, udata,
1857 				       min(sizeof(req), udata->inlen)))
1858 			return -EINVAL;
1859 
1860 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1861 		iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1862 					  &ucontext->cq_reg_mem_list);
1863 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1864 
1865 		if (!iwpbl_buf)
1866 			return -ENOMEM;
1867 
1868 		cqmr_buf = &iwpbl_buf->cq_mr;
1869 		if (iwpbl_buf->pbl_allocated) {
1870 			info.virtual_map = true;
1871 			info.pbl_chunk_size = 1;
1872 			info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1873 		} else {
1874 			info.cq_pa = cqmr_buf->cq_pbl.addr;
1875 		}
1876 	} else {
1877 		/* Kmode CQ resize */
1878 		int rsize;
1879 
1880 		rsize = info.cq_size * sizeof(struct irdma_cqe);
1881 		kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
1882 		kmem_buf.va = dma_alloc_coherent(dev->hw->device,
1883 						 kmem_buf.size, &kmem_buf.pa,
1884 						 GFP_KERNEL);
1885 		if (!kmem_buf.va)
1886 			return -ENOMEM;
1887 
1888 		info.cq_base = kmem_buf.va;
1889 		info.cq_pa = kmem_buf.pa;
1890 		cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1891 		if (!cq_buf) {
1892 			ret = -ENOMEM;
1893 			goto error;
1894 		}
1895 	}
1896 
1897 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1898 	if (!cqp_request) {
1899 		ret = -ENOMEM;
1900 		goto error;
1901 	}
1902 
1903 	info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1904 	info.cq_resize = true;
1905 
1906 	cqp_info = &cqp_request->info;
1907 	m_info = &cqp_info->in.u.cq_modify.info;
1908 	memcpy(m_info, &info, sizeof(*m_info));
1909 
1910 	cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1911 	cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1912 	cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1913 	cqp_info->post_sq = 1;
1914 	ret = irdma_handle_cqp_op(rf, cqp_request);
1915 	irdma_put_cqp_request(&rf->cqp, cqp_request);
1916 	if (ret)
1917 		goto error;
1918 
1919 	spin_lock_irqsave(&iwcq->lock, flags);
1920 	if (cq_buf) {
1921 		cq_buf->kmem_buf = iwcq->kmem;
1922 		cq_buf->hw = dev->hw;
1923 		memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1924 		INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1925 		list_add_tail(&cq_buf->list, &iwcq->resize_list);
1926 		iwcq->kmem = kmem_buf;
1927 	}
1928 
1929 	irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1930 	ibcq->cqe = info.cq_size - 1;
1931 	spin_unlock_irqrestore(&iwcq->lock, flags);
1932 
1933 	return 0;
1934 error:
1935 	if (!udata) {
1936 		dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
1937 				  kmem_buf.pa);
1938 		kmem_buf.va = NULL;
1939 	}
1940 	kfree(cq_buf);
1941 
1942 	return ret;
1943 }
1944 
1945 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
1946 {
1947 	/* GEN1 does not support CQ create flags */
1948 	if (hw_rev == IRDMA_GEN_1)
1949 		return flags ? -EOPNOTSUPP : 0;
1950 
1951 	return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
1952 }
1953 
1954 /**
1955  * irdma_create_cq - create cq
1956  * @ibcq: CQ allocated
1957  * @attr: attributes for cq
1958  * @udata: user data
1959  */
1960 static int irdma_create_cq(struct ib_cq *ibcq,
1961 			   const struct ib_cq_init_attr *attr,
1962 			   struct ib_udata *udata)
1963 {
1964 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
1965 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
1966 	struct ib_device *ibdev = ibcq->device;
1967 	struct irdma_device *iwdev = to_iwdev(ibdev);
1968 	struct irdma_pci_f *rf = iwdev->rf;
1969 	struct irdma_cq *iwcq = to_iwcq(ibcq);
1970 	u32 cq_num = 0;
1971 	struct irdma_sc_cq *cq;
1972 	struct irdma_sc_dev *dev = &rf->sc_dev;
1973 	struct irdma_cq_init_info info = {};
1974 	struct irdma_cqp_request *cqp_request;
1975 	struct cqp_cmds_info *cqp_info;
1976 	struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1977 	unsigned long flags;
1978 	int err_code;
1979 	int entries = attr->cqe;
1980 
1981 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1982 	if (err_code)
1983 		return err_code;
1984 
1985 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
1986 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
1987 		return -EINVAL;
1988 
1989 	err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1990 				    &rf->next_cq);
1991 	if (err_code)
1992 		return err_code;
1993 
1994 	cq = &iwcq->sc_cq;
1995 	cq->back_cq = iwcq;
1996 	spin_lock_init(&iwcq->lock);
1997 	INIT_LIST_HEAD(&iwcq->resize_list);
1998 	INIT_LIST_HEAD(&iwcq->cmpl_generated);
1999 	info.dev = dev;
2000 	ukinfo->cq_size = max(entries, 4);
2001 	ukinfo->cq_id = cq_num;
2002 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
2003 	if (attr->comp_vector < rf->ceqs_count)
2004 		info.ceq_id = attr->comp_vector;
2005 	info.ceq_id_valid = true;
2006 	info.ceqe_mask = 1;
2007 	info.type = IRDMA_CQ_TYPE_IWARP;
2008 	info.vsi = &iwdev->vsi;
2009 
2010 	if (udata) {
2011 		struct irdma_ucontext *ucontext;
2012 		struct irdma_create_cq_req req = {};
2013 		struct irdma_cq_mr *cqmr;
2014 		struct irdma_pbl *iwpbl;
2015 		struct irdma_pbl *iwpbl_shadow;
2016 		struct irdma_cq_mr *cqmr_shadow;
2017 
2018 		iwcq->user_mode = true;
2019 		ucontext =
2020 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2021 						  ibucontext);
2022 		if (ib_copy_from_udata(&req, udata,
2023 				       min(sizeof(req), udata->inlen))) {
2024 			err_code = -EFAULT;
2025 			goto cq_free_rsrc;
2026 		}
2027 
2028 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2029 		iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2030 				      &ucontext->cq_reg_mem_list);
2031 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2032 		if (!iwpbl) {
2033 			err_code = -EPROTO;
2034 			goto cq_free_rsrc;
2035 		}
2036 
2037 		iwcq->iwpbl = iwpbl;
2038 		iwcq->cq_mem_size = 0;
2039 		cqmr = &iwpbl->cq_mr;
2040 
2041 		if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2042 		    IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2043 			spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2044 			iwpbl_shadow = irdma_get_pbl(
2045 					(unsigned long)req.user_shadow_area,
2046 					&ucontext->cq_reg_mem_list);
2047 			spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2048 
2049 			if (!iwpbl_shadow) {
2050 				err_code = -EPROTO;
2051 				goto cq_free_rsrc;
2052 			}
2053 			iwcq->iwpbl_shadow = iwpbl_shadow;
2054 			cqmr_shadow = &iwpbl_shadow->cq_mr;
2055 			info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2056 			cqmr->split = true;
2057 		} else {
2058 			info.shadow_area_pa = cqmr->shadow;
2059 		}
2060 		if (iwpbl->pbl_allocated) {
2061 			info.virtual_map = true;
2062 			info.pbl_chunk_size = 1;
2063 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2064 		} else {
2065 			info.cq_base_pa = cqmr->cq_pbl.addr;
2066 		}
2067 	} else {
2068 		/* Kmode allocations */
2069 		int rsize;
2070 
2071 		if (entries < 1 || entries > rf->max_cqe) {
2072 			err_code = -EINVAL;
2073 			goto cq_free_rsrc;
2074 		}
2075 
2076 		entries++;
2077 		if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2078 			entries *= 2;
2079 		ukinfo->cq_size = entries;
2080 
2081 		rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2082 		iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2083 		iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2084 						   iwcq->kmem.size,
2085 						   &iwcq->kmem.pa, GFP_KERNEL);
2086 		if (!iwcq->kmem.va) {
2087 			err_code = -ENOMEM;
2088 			goto cq_free_rsrc;
2089 		}
2090 
2091 		iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2092 					       64);
2093 		iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2094 							  iwcq->kmem_shadow.size,
2095 							  &iwcq->kmem_shadow.pa,
2096 							  GFP_KERNEL);
2097 		if (!iwcq->kmem_shadow.va) {
2098 			err_code = -ENOMEM;
2099 			goto cq_free_rsrc;
2100 		}
2101 		info.shadow_area_pa = iwcq->kmem_shadow.pa;
2102 		ukinfo->shadow_area = iwcq->kmem_shadow.va;
2103 		ukinfo->cq_base = iwcq->kmem.va;
2104 		info.cq_base_pa = iwcq->kmem.pa;
2105 	}
2106 
2107 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2108 		info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2109 						 (u32)IRDMA_MAX_CQ_READ_THRESH);
2110 
2111 	if (irdma_sc_cq_init(cq, &info)) {
2112 		ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2113 		err_code = -EPROTO;
2114 		goto cq_free_rsrc;
2115 	}
2116 
2117 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2118 	if (!cqp_request) {
2119 		err_code = -ENOMEM;
2120 		goto cq_free_rsrc;
2121 	}
2122 
2123 	cqp_info = &cqp_request->info;
2124 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2125 	cqp_info->post_sq = 1;
2126 	cqp_info->in.u.cq_create.cq = cq;
2127 	cqp_info->in.u.cq_create.check_overflow = true;
2128 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2129 	err_code = irdma_handle_cqp_op(rf, cqp_request);
2130 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2131 	if (err_code)
2132 		goto cq_free_rsrc;
2133 
2134 	if (udata) {
2135 		struct irdma_create_cq_resp resp = {};
2136 
2137 		resp.cq_id = info.cq_uk_init_info.cq_id;
2138 		resp.cq_size = info.cq_uk_init_info.cq_size;
2139 		if (ib_copy_to_udata(udata, &resp,
2140 				     min(sizeof(resp), udata->outlen))) {
2141 			ibdev_dbg(&iwdev->ibdev,
2142 				  "VERBS: copy to user data\n");
2143 			err_code = -EPROTO;
2144 			goto cq_destroy;
2145 		}
2146 	}
2147 	return 0;
2148 cq_destroy:
2149 	irdma_cq_wq_destroy(rf, cq);
2150 cq_free_rsrc:
2151 	irdma_cq_free_rsrc(rf, iwcq);
2152 
2153 	return err_code;
2154 }
2155 
2156 /**
2157  * irdma_get_mr_access - get hw MR access permissions from IB access flags
2158  * @access: IB access flags
2159  */
2160 static inline u16 irdma_get_mr_access(int access)
2161 {
2162 	u16 hw_access = 0;
2163 
2164 	hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2165 		     IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2166 	hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2167 		     IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2168 	hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2169 		     IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2170 	hw_access |= (access & IB_ACCESS_MW_BIND) ?
2171 		     IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2172 	hw_access |= (access & IB_ZERO_BASED) ?
2173 		     IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2174 	hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2175 
2176 	return hw_access;
2177 }
2178 
2179 /**
2180  * irdma_free_stag - free stag resource
2181  * @iwdev: irdma device
2182  * @stag: stag to free
2183  */
2184 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2185 {
2186 	u32 stag_idx;
2187 
2188 	stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2189 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2190 }
2191 
2192 /**
2193  * irdma_create_stag - create random stag
2194  * @iwdev: irdma device
2195  */
2196 static u32 irdma_create_stag(struct irdma_device *iwdev)
2197 {
2198 	u32 stag = 0;
2199 	u32 stag_index = 0;
2200 	u32 next_stag_index;
2201 	u32 driver_key;
2202 	u32 random;
2203 	u8 consumer_key;
2204 	int ret;
2205 
2206 	get_random_bytes(&random, sizeof(random));
2207 	consumer_key = (u8)random;
2208 
2209 	driver_key = random & ~iwdev->rf->mr_stagmask;
2210 	next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2211 	next_stag_index %= iwdev->rf->max_mr;
2212 
2213 	ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2214 			       iwdev->rf->max_mr, &stag_index,
2215 			       &next_stag_index);
2216 	if (ret)
2217 		return stag;
2218 	stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2219 	stag |= driver_key;
2220 	stag += (u32)consumer_key;
2221 
2222 	return stag;
2223 }
2224 
2225 /**
2226  * irdma_next_pbl_addr - Get next pbl address
2227  * @pbl: pointer to a pble
2228  * @pinfo: info pointer
2229  * @idx: index
2230  */
2231 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2232 				       u32 *idx)
2233 {
2234 	*idx += 1;
2235 	if (!(*pinfo) || *idx != (*pinfo)->cnt)
2236 		return ++pbl;
2237 	*idx = 0;
2238 	(*pinfo)++;
2239 
2240 	return (*pinfo)->addr;
2241 }
2242 
2243 /**
2244  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2245  * @iwmr: iwmr for IB's user page addresses
2246  * @pbl: ple pointer to save 1 level or 0 level pble
2247  * @level: indicated level 0, 1 or 2
2248  */
2249 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2250 				    enum irdma_pble_level level)
2251 {
2252 	struct ib_umem *region = iwmr->region;
2253 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2254 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2255 	struct irdma_pble_info *pinfo;
2256 	struct ib_block_iter biter;
2257 	u32 idx = 0;
2258 	u32 pbl_cnt = 0;
2259 
2260 	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2261 
2262 	if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2263 		iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2264 
2265 	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2266 		*pbl = rdma_block_iter_dma_address(&biter);
2267 		if (++pbl_cnt == palloc->total_cnt)
2268 			break;
2269 		pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2270 	}
2271 }
2272 
2273 /**
2274  * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2275  * @arr: lvl1 pbl array
2276  * @npages: page count
2277  * @pg_size: page size
2278  *
2279  */
2280 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2281 {
2282 	u32 pg_idx;
2283 
2284 	for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2285 		if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2286 			return false;
2287 	}
2288 
2289 	return true;
2290 }
2291 
2292 /**
2293  * irdma_check_mr_contiguous - check if MR is physically contiguous
2294  * @palloc: pbl allocation struct
2295  * @pg_size: page size
2296  */
2297 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2298 				      u32 pg_size)
2299 {
2300 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
2301 	struct irdma_pble_info *leaf = lvl2->leaf;
2302 	u64 *arr = NULL;
2303 	u64 *start_addr = NULL;
2304 	int i;
2305 	bool ret;
2306 
2307 	if (palloc->level == PBLE_LEVEL_1) {
2308 		arr = palloc->level1.addr;
2309 		ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2310 						 pg_size);
2311 		return ret;
2312 	}
2313 
2314 	start_addr = leaf->addr;
2315 
2316 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2317 		arr = leaf->addr;
2318 		if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2319 			return false;
2320 		ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2321 		if (!ret)
2322 			return false;
2323 	}
2324 
2325 	return true;
2326 }
2327 
2328 /**
2329  * irdma_setup_pbles - copy user pg address to pble's
2330  * @rf: RDMA PCI function
2331  * @iwmr: mr pointer for this memory registration
2332  * @use_pbles: flag if to use pble's
2333  * @lvl_1_only: request only level 1 pble if true
2334  */
2335 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2336 			     bool use_pbles, bool lvl_1_only)
2337 {
2338 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2339 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2340 	struct irdma_pble_info *pinfo;
2341 	u64 *pbl;
2342 	int status;
2343 	enum irdma_pble_level level = PBLE_LEVEL_1;
2344 
2345 	if (use_pbles) {
2346 		status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2347 					lvl_1_only);
2348 		if (status)
2349 			return status;
2350 
2351 		iwpbl->pbl_allocated = true;
2352 		level = palloc->level;
2353 		pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2354 						  palloc->level2.leaf;
2355 		pbl = pinfo->addr;
2356 	} else {
2357 		pbl = iwmr->pgaddrmem;
2358 	}
2359 
2360 	irdma_copy_user_pgaddrs(iwmr, pbl, level);
2361 
2362 	if (use_pbles)
2363 		iwmr->pgaddrmem[0] = *pbl;
2364 
2365 	return 0;
2366 }
2367 
2368 /**
2369  * irdma_handle_q_mem - handle memory for qp and cq
2370  * @iwdev: irdma device
2371  * @req: information for q memory management
2372  * @iwpbl: pble struct
2373  * @use_pbles: flag to use pble
2374  */
2375 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2376 			      struct irdma_mem_reg_req *req,
2377 			      struct irdma_pbl *iwpbl, bool use_pbles)
2378 {
2379 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2380 	struct irdma_mr *iwmr = iwpbl->iwmr;
2381 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2382 	struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2383 	struct irdma_hmc_pble *hmc_p;
2384 	u64 *arr = iwmr->pgaddrmem;
2385 	u32 pg_size, total;
2386 	int err = 0;
2387 	bool ret = true;
2388 
2389 	pg_size = iwmr->page_size;
2390 	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true);
2391 	if (err)
2392 		return err;
2393 
2394 	if (use_pbles)
2395 		arr = palloc->level1.addr;
2396 
2397 	switch (iwmr->type) {
2398 	case IRDMA_MEMREG_TYPE_QP:
2399 		total = req->sq_pages + req->rq_pages;
2400 		hmc_p = &qpmr->sq_pbl;
2401 		qpmr->shadow = (dma_addr_t)arr[total];
2402 
2403 		if (use_pbles) {
2404 			ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2405 							 pg_size);
2406 			if (ret)
2407 				ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2408 								 req->rq_pages,
2409 								 pg_size);
2410 		}
2411 
2412 		if (!ret) {
2413 			hmc_p->idx = palloc->level1.idx;
2414 			hmc_p = &qpmr->rq_pbl;
2415 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
2416 		} else {
2417 			hmc_p->addr = arr[0];
2418 			hmc_p = &qpmr->rq_pbl;
2419 			hmc_p->addr = arr[req->sq_pages];
2420 		}
2421 		break;
2422 	case IRDMA_MEMREG_TYPE_CQ:
2423 		hmc_p = &cqmr->cq_pbl;
2424 
2425 		if (!cqmr->split)
2426 			cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2427 
2428 		if (use_pbles)
2429 			ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2430 							 pg_size);
2431 
2432 		if (!ret)
2433 			hmc_p->idx = palloc->level1.idx;
2434 		else
2435 			hmc_p->addr = arr[0];
2436 	break;
2437 	default:
2438 		ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2439 		err = -EINVAL;
2440 	}
2441 
2442 	if (use_pbles && ret) {
2443 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2444 		iwpbl->pbl_allocated = false;
2445 	}
2446 
2447 	return err;
2448 }
2449 
2450 /**
2451  * irdma_hw_alloc_mw - create the hw memory window
2452  * @iwdev: irdma device
2453  * @iwmr: pointer to memory window info
2454  */
2455 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2456 {
2457 	struct irdma_mw_alloc_info *info;
2458 	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2459 	struct irdma_cqp_request *cqp_request;
2460 	struct cqp_cmds_info *cqp_info;
2461 	int status;
2462 
2463 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2464 	if (!cqp_request)
2465 		return -ENOMEM;
2466 
2467 	cqp_info = &cqp_request->info;
2468 	info = &cqp_info->in.u.mw_alloc.info;
2469 	memset(info, 0, sizeof(*info));
2470 	if (iwmr->ibmw.type == IB_MW_TYPE_1)
2471 		info->mw_wide = true;
2472 
2473 	info->page_size = PAGE_SIZE;
2474 	info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2475 	info->pd_id = iwpd->sc_pd.pd_id;
2476 	info->remote_access = true;
2477 	cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2478 	cqp_info->post_sq = 1;
2479 	cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2480 	cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2481 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2482 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2483 
2484 	return status;
2485 }
2486 
2487 /**
2488  * irdma_alloc_mw - Allocate memory window
2489  * @ibmw: Memory Window
2490  * @udata: user data pointer
2491  */
2492 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2493 {
2494 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
2495 	struct irdma_mr *iwmr = to_iwmw(ibmw);
2496 	int err_code;
2497 	u32 stag;
2498 
2499 	stag = irdma_create_stag(iwdev);
2500 	if (!stag)
2501 		return -ENOMEM;
2502 
2503 	iwmr->stag = stag;
2504 	ibmw->rkey = stag;
2505 
2506 	err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2507 	if (err_code) {
2508 		irdma_free_stag(iwdev, stag);
2509 		return err_code;
2510 	}
2511 
2512 	return 0;
2513 }
2514 
2515 /**
2516  * irdma_dealloc_mw - Dealloc memory window
2517  * @ibmw: memory window structure.
2518  */
2519 static int irdma_dealloc_mw(struct ib_mw *ibmw)
2520 {
2521 	struct ib_pd *ibpd = ibmw->pd;
2522 	struct irdma_pd *iwpd = to_iwpd(ibpd);
2523 	struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2524 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
2525 	struct irdma_cqp_request *cqp_request;
2526 	struct cqp_cmds_info *cqp_info;
2527 	struct irdma_dealloc_stag_info *info;
2528 
2529 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2530 	if (!cqp_request)
2531 		return -ENOMEM;
2532 
2533 	cqp_info = &cqp_request->info;
2534 	info = &cqp_info->in.u.dealloc_stag.info;
2535 	memset(info, 0, sizeof(*info));
2536 	info->pd_id = iwpd->sc_pd.pd_id;
2537 	info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
2538 	info->mr = false;
2539 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2540 	cqp_info->post_sq = 1;
2541 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2542 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2543 	irdma_handle_cqp_op(iwdev->rf, cqp_request);
2544 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2545 	irdma_free_stag(iwdev, iwmr->stag);
2546 
2547 	return 0;
2548 }
2549 
2550 /**
2551  * irdma_hw_alloc_stag - cqp command to allocate stag
2552  * @iwdev: irdma device
2553  * @iwmr: irdma mr pointer
2554  */
2555 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2556 			       struct irdma_mr *iwmr)
2557 {
2558 	struct irdma_allocate_stag_info *info;
2559 	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2560 	int status;
2561 	struct irdma_cqp_request *cqp_request;
2562 	struct cqp_cmds_info *cqp_info;
2563 
2564 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2565 	if (!cqp_request)
2566 		return -ENOMEM;
2567 
2568 	cqp_info = &cqp_request->info;
2569 	info = &cqp_info->in.u.alloc_stag.info;
2570 	memset(info, 0, sizeof(*info));
2571 	info->page_size = PAGE_SIZE;
2572 	info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2573 	info->pd_id = iwpd->sc_pd.pd_id;
2574 	info->total_len = iwmr->len;
2575 	info->remote_access = true;
2576 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2577 	cqp_info->post_sq = 1;
2578 	cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2579 	cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2580 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2581 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2582 
2583 	return status;
2584 }
2585 
2586 /**
2587  * irdma_alloc_mr - register stag for fast memory registration
2588  * @pd: ibpd pointer
2589  * @mr_type: memory for stag registrion
2590  * @max_num_sg: man number of pages
2591  */
2592 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2593 				    u32 max_num_sg)
2594 {
2595 	struct irdma_device *iwdev = to_iwdev(pd->device);
2596 	struct irdma_pble_alloc *palloc;
2597 	struct irdma_pbl *iwpbl;
2598 	struct irdma_mr *iwmr;
2599 	u32 stag;
2600 	int err_code;
2601 
2602 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2603 	if (!iwmr)
2604 		return ERR_PTR(-ENOMEM);
2605 
2606 	stag = irdma_create_stag(iwdev);
2607 	if (!stag) {
2608 		err_code = -ENOMEM;
2609 		goto err;
2610 	}
2611 
2612 	iwmr->stag = stag;
2613 	iwmr->ibmr.rkey = stag;
2614 	iwmr->ibmr.lkey = stag;
2615 	iwmr->ibmr.pd = pd;
2616 	iwmr->ibmr.device = pd->device;
2617 	iwpbl = &iwmr->iwpbl;
2618 	iwpbl->iwmr = iwmr;
2619 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2620 	palloc = &iwpbl->pble_alloc;
2621 	iwmr->page_cnt = max_num_sg;
2622 	err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2623 				  false);
2624 	if (err_code)
2625 		goto err_get_pble;
2626 
2627 	err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2628 	if (err_code)
2629 		goto err_alloc_stag;
2630 
2631 	iwpbl->pbl_allocated = true;
2632 
2633 	return &iwmr->ibmr;
2634 err_alloc_stag:
2635 	irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2636 err_get_pble:
2637 	irdma_free_stag(iwdev, stag);
2638 err:
2639 	kfree(iwmr);
2640 
2641 	return ERR_PTR(err_code);
2642 }
2643 
2644 /**
2645  * irdma_set_page - populate pbl list for fmr
2646  * @ibmr: ib mem to access iwarp mr pointer
2647  * @addr: page dma address fro pbl list
2648  */
2649 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
2650 {
2651 	struct irdma_mr *iwmr = to_iwmr(ibmr);
2652 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2653 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2654 	u64 *pbl;
2655 
2656 	if (unlikely(iwmr->npages == iwmr->page_cnt))
2657 		return -ENOMEM;
2658 
2659 	if (palloc->level == PBLE_LEVEL_2) {
2660 		struct irdma_pble_info *palloc_info =
2661 			palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
2662 
2663 		palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
2664 	} else {
2665 		pbl = palloc->level1.addr;
2666 		pbl[iwmr->npages] = addr;
2667 	}
2668 	iwmr->npages++;
2669 
2670 	return 0;
2671 }
2672 
2673 /**
2674  * irdma_map_mr_sg - map of sg list for fmr
2675  * @ibmr: ib mem to access iwarp mr pointer
2676  * @sg: scatter gather list
2677  * @sg_nents: number of sg pages
2678  * @sg_offset: scatter gather list for fmr
2679  */
2680 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2681 			   int sg_nents, unsigned int *sg_offset)
2682 {
2683 	struct irdma_mr *iwmr = to_iwmr(ibmr);
2684 
2685 	iwmr->npages = 0;
2686 
2687 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2688 }
2689 
2690 /**
2691  * irdma_hwreg_mr - send cqp command for memory registration
2692  * @iwdev: irdma device
2693  * @iwmr: irdma mr pointer
2694  * @access: access for MR
2695  */
2696 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2697 			  u16 access)
2698 {
2699 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2700 	struct irdma_reg_ns_stag_info *stag_info;
2701 	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2702 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2703 	struct irdma_cqp_request *cqp_request;
2704 	struct cqp_cmds_info *cqp_info;
2705 	int ret;
2706 
2707 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2708 	if (!cqp_request)
2709 		return -ENOMEM;
2710 
2711 	cqp_info = &cqp_request->info;
2712 	stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2713 	memset(stag_info, 0, sizeof(*stag_info));
2714 	stag_info->va = iwpbl->user_base;
2715 	stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2716 	stag_info->stag_key = (u8)iwmr->stag;
2717 	stag_info->total_len = iwmr->len;
2718 	stag_info->access_rights = irdma_get_mr_access(access);
2719 	stag_info->pd_id = iwpd->sc_pd.pd_id;
2720 	if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2721 		stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2722 	else
2723 		stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2724 	stag_info->page_size = iwmr->page_size;
2725 
2726 	if (iwpbl->pbl_allocated) {
2727 		if (palloc->level == PBLE_LEVEL_1) {
2728 			stag_info->first_pm_pbl_index = palloc->level1.idx;
2729 			stag_info->chunk_size = 1;
2730 		} else {
2731 			stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2732 			stag_info->chunk_size = 3;
2733 		}
2734 	} else {
2735 		stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2736 	}
2737 
2738 	cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2739 	cqp_info->post_sq = 1;
2740 	cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2741 	cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2742 	ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2743 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2744 
2745 	return ret;
2746 }
2747 
2748 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
2749 {
2750 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2751 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2752 	bool use_pbles;
2753 	u32 stag;
2754 	int err;
2755 
2756 	use_pbles = iwmr->page_cnt != 1;
2757 
2758 	err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
2759 	if (err)
2760 		return err;
2761 
2762 	if (use_pbles) {
2763 		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
2764 						iwmr->page_size);
2765 		if (err) {
2766 			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2767 			iwpbl->pbl_allocated = false;
2768 		}
2769 	}
2770 
2771 	stag = irdma_create_stag(iwdev);
2772 	if (!stag) {
2773 		err = -ENOMEM;
2774 		goto free_pble;
2775 	}
2776 
2777 	iwmr->stag = stag;
2778 	iwmr->ibmr.rkey = stag;
2779 	iwmr->ibmr.lkey = stag;
2780 	err = irdma_hwreg_mr(iwdev, iwmr, access);
2781 	if (err)
2782 		goto err_hwreg;
2783 
2784 	return 0;
2785 
2786 err_hwreg:
2787 	irdma_free_stag(iwdev, stag);
2788 
2789 free_pble:
2790 	if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2791 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2792 
2793 	return err;
2794 }
2795 
2796 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
2797 					 struct ib_pd *pd, u64 virt,
2798 					 enum irdma_memreg_type reg_type)
2799 {
2800 	struct irdma_device *iwdev = to_iwdev(pd->device);
2801 	struct irdma_pbl *iwpbl = NULL;
2802 	struct irdma_mr *iwmr = NULL;
2803 	unsigned long pgsz_bitmap;
2804 
2805 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2806 	if (!iwmr)
2807 		return ERR_PTR(-ENOMEM);
2808 
2809 	iwpbl = &iwmr->iwpbl;
2810 	iwpbl->iwmr = iwmr;
2811 	iwmr->region = region;
2812 	iwmr->ibmr.pd = pd;
2813 	iwmr->ibmr.device = pd->device;
2814 	iwmr->ibmr.iova = virt;
2815 	iwmr->type = reg_type;
2816 
2817 	pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
2818 		iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
2819 
2820 	iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
2821 	if (unlikely(!iwmr->page_size)) {
2822 		kfree(iwmr);
2823 		return ERR_PTR(-EOPNOTSUPP);
2824 	}
2825 
2826 	iwmr->len = region->length;
2827 	iwpbl->user_base = virt;
2828 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
2829 
2830 	return iwmr;
2831 }
2832 
2833 static void irdma_free_iwmr(struct irdma_mr *iwmr)
2834 {
2835 	kfree(iwmr);
2836 }
2837 
2838 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
2839 				     struct ib_udata *udata,
2840 				     struct irdma_mr *iwmr)
2841 {
2842 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2843 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2844 	struct irdma_ucontext *ucontext = NULL;
2845 	unsigned long flags;
2846 	bool use_pbles;
2847 	u32 total;
2848 	int err;
2849 
2850 	total = req.sq_pages + req.rq_pages + 1;
2851 	if (total > iwmr->page_cnt)
2852 		return -EINVAL;
2853 
2854 	total = req.sq_pages + req.rq_pages;
2855 	use_pbles = total > 2;
2856 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2857 	if (err)
2858 		return err;
2859 
2860 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2861 					     ibucontext);
2862 	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2863 	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2864 	iwpbl->on_list = true;
2865 	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2866 
2867 	return 0;
2868 }
2869 
2870 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
2871 				     struct ib_udata *udata,
2872 				     struct irdma_mr *iwmr)
2873 {
2874 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2875 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2876 	struct irdma_ucontext *ucontext = NULL;
2877 	u8 shadow_pgcnt = 1;
2878 	unsigned long flags;
2879 	bool use_pbles;
2880 	u32 total;
2881 	int err;
2882 
2883 	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2884 		shadow_pgcnt = 0;
2885 	total = req.cq_pages + shadow_pgcnt;
2886 	if (total > iwmr->page_cnt)
2887 		return -EINVAL;
2888 
2889 	use_pbles = req.cq_pages > 1;
2890 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2891 	if (err)
2892 		return err;
2893 
2894 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2895 					     ibucontext);
2896 	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2897 	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2898 	iwpbl->on_list = true;
2899 	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2900 
2901 	return 0;
2902 }
2903 
2904 /**
2905  * irdma_reg_user_mr - Register a user memory region
2906  * @pd: ptr of pd
2907  * @start: virtual start address
2908  * @len: length of mr
2909  * @virt: virtual address
2910  * @access: access of mr
2911  * @udata: user data
2912  */
2913 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2914 				       u64 virt, int access,
2915 				       struct ib_udata *udata)
2916 {
2917 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
2918 	struct irdma_device *iwdev = to_iwdev(pd->device);
2919 	struct irdma_mem_reg_req req = {};
2920 	struct ib_umem *region = NULL;
2921 	struct irdma_mr *iwmr = NULL;
2922 	int err;
2923 
2924 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2925 		return ERR_PTR(-EINVAL);
2926 
2927 	if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
2928 		return ERR_PTR(-EINVAL);
2929 
2930 	region = ib_umem_get(pd->device, start, len, access);
2931 
2932 	if (IS_ERR(region)) {
2933 		ibdev_dbg(&iwdev->ibdev,
2934 			  "VERBS: Failed to create ib_umem region\n");
2935 		return (struct ib_mr *)region;
2936 	}
2937 
2938 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2939 		ib_umem_release(region);
2940 		return ERR_PTR(-EFAULT);
2941 	}
2942 
2943 	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
2944 	if (IS_ERR(iwmr)) {
2945 		ib_umem_release(region);
2946 		return (struct ib_mr *)iwmr;
2947 	}
2948 
2949 	switch (req.reg_type) {
2950 	case IRDMA_MEMREG_TYPE_QP:
2951 		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
2952 		if (err)
2953 			goto error;
2954 
2955 		break;
2956 	case IRDMA_MEMREG_TYPE_CQ:
2957 		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
2958 		if (err)
2959 			goto error;
2960 		break;
2961 	case IRDMA_MEMREG_TYPE_MEM:
2962 		err = irdma_reg_user_mr_type_mem(iwmr, access);
2963 		if (err)
2964 			goto error;
2965 
2966 		break;
2967 	default:
2968 		err = -EINVAL;
2969 		goto error;
2970 	}
2971 
2972 	return &iwmr->ibmr;
2973 error:
2974 	ib_umem_release(region);
2975 	irdma_free_iwmr(iwmr);
2976 
2977 	return ERR_PTR(err);
2978 }
2979 
2980 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
2981 					      u64 len, u64 virt,
2982 					      int fd, int access,
2983 					      struct ib_udata *udata)
2984 {
2985 	struct irdma_device *iwdev = to_iwdev(pd->device);
2986 	struct ib_umem_dmabuf *umem_dmabuf;
2987 	struct irdma_mr *iwmr;
2988 	int err;
2989 
2990 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2991 		return ERR_PTR(-EINVAL);
2992 
2993 	umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
2994 	if (IS_ERR(umem_dmabuf)) {
2995 		err = PTR_ERR(umem_dmabuf);
2996 		ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
2997 		return ERR_PTR(err);
2998 	}
2999 
3000 	iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
3001 	if (IS_ERR(iwmr)) {
3002 		err = PTR_ERR(iwmr);
3003 		goto err_release;
3004 	}
3005 
3006 	err = irdma_reg_user_mr_type_mem(iwmr, access);
3007 	if (err)
3008 		goto err_iwmr;
3009 
3010 	return &iwmr->ibmr;
3011 
3012 err_iwmr:
3013 	irdma_free_iwmr(iwmr);
3014 
3015 err_release:
3016 	ib_umem_release(&umem_dmabuf->umem);
3017 
3018 	return ERR_PTR(err);
3019 }
3020 
3021 /**
3022  * irdma_reg_phys_mr - register kernel physical memory
3023  * @pd: ibpd pointer
3024  * @addr: physical address of memory to register
3025  * @size: size of memory to register
3026  * @access: Access rights
3027  * @iova_start: start of virtual address for physical buffers
3028  */
3029 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
3030 				u64 *iova_start)
3031 {
3032 	struct irdma_device *iwdev = to_iwdev(pd->device);
3033 	struct irdma_pbl *iwpbl;
3034 	struct irdma_mr *iwmr;
3035 	u32 stag;
3036 	int ret;
3037 
3038 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
3039 	if (!iwmr)
3040 		return ERR_PTR(-ENOMEM);
3041 
3042 	iwmr->ibmr.pd = pd;
3043 	iwmr->ibmr.device = pd->device;
3044 	iwpbl = &iwmr->iwpbl;
3045 	iwpbl->iwmr = iwmr;
3046 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3047 	iwpbl->user_base = *iova_start;
3048 	stag = irdma_create_stag(iwdev);
3049 	if (!stag) {
3050 		ret = -ENOMEM;
3051 		goto err;
3052 	}
3053 
3054 	iwmr->stag = stag;
3055 	iwmr->ibmr.iova = *iova_start;
3056 	iwmr->ibmr.rkey = stag;
3057 	iwmr->ibmr.lkey = stag;
3058 	iwmr->page_cnt = 1;
3059 	iwmr->pgaddrmem[0] = addr;
3060 	iwmr->len = size;
3061 	iwmr->page_size = SZ_4K;
3062 	ret = irdma_hwreg_mr(iwdev, iwmr, access);
3063 	if (ret) {
3064 		irdma_free_stag(iwdev, stag);
3065 		goto err;
3066 	}
3067 
3068 	return &iwmr->ibmr;
3069 
3070 err:
3071 	kfree(iwmr);
3072 
3073 	return ERR_PTR(ret);
3074 }
3075 
3076 /**
3077  * irdma_get_dma_mr - register physical mem
3078  * @pd: ptr of pd
3079  * @acc: access for memory
3080  */
3081 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
3082 {
3083 	u64 kva = 0;
3084 
3085 	return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
3086 }
3087 
3088 /**
3089  * irdma_del_memlist - Deleting pbl list entries for CQ/QP
3090  * @iwmr: iwmr for IB's user page addresses
3091  * @ucontext: ptr to user context
3092  */
3093 static void irdma_del_memlist(struct irdma_mr *iwmr,
3094 			      struct irdma_ucontext *ucontext)
3095 {
3096 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3097 	unsigned long flags;
3098 
3099 	switch (iwmr->type) {
3100 	case IRDMA_MEMREG_TYPE_CQ:
3101 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3102 		if (iwpbl->on_list) {
3103 			iwpbl->on_list = false;
3104 			list_del(&iwpbl->list);
3105 		}
3106 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3107 		break;
3108 	case IRDMA_MEMREG_TYPE_QP:
3109 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3110 		if (iwpbl->on_list) {
3111 			iwpbl->on_list = false;
3112 			list_del(&iwpbl->list);
3113 		}
3114 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3115 		break;
3116 	default:
3117 		break;
3118 	}
3119 }
3120 
3121 /**
3122  * irdma_dereg_mr - deregister mr
3123  * @ib_mr: mr ptr for dereg
3124  * @udata: user data
3125  */
3126 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3127 {
3128 	struct ib_pd *ibpd = ib_mr->pd;
3129 	struct irdma_pd *iwpd = to_iwpd(ibpd);
3130 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3131 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3132 	struct irdma_dealloc_stag_info *info;
3133 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3134 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3135 	struct irdma_cqp_request *cqp_request;
3136 	struct cqp_cmds_info *cqp_info;
3137 	int status;
3138 
3139 	if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3140 		if (iwmr->region) {
3141 			struct irdma_ucontext *ucontext;
3142 
3143 			ucontext = rdma_udata_to_drv_context(udata,
3144 						struct irdma_ucontext,
3145 						ibucontext);
3146 			irdma_del_memlist(iwmr, ucontext);
3147 		}
3148 		goto done;
3149 	}
3150 
3151 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3152 	if (!cqp_request)
3153 		return -ENOMEM;
3154 
3155 	cqp_info = &cqp_request->info;
3156 	info = &cqp_info->in.u.dealloc_stag.info;
3157 	memset(info, 0, sizeof(*info));
3158 	info->pd_id = iwpd->sc_pd.pd_id;
3159 	info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3160 	info->mr = true;
3161 	if (iwpbl->pbl_allocated)
3162 		info->dealloc_pbl = true;
3163 
3164 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3165 	cqp_info->post_sq = 1;
3166 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3167 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3168 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3169 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3170 	if (status)
3171 		return status;
3172 
3173 	irdma_free_stag(iwdev, iwmr->stag);
3174 done:
3175 	if (iwpbl->pbl_allocated)
3176 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3177 	ib_umem_release(iwmr->region);
3178 	kfree(iwmr);
3179 
3180 	return 0;
3181 }
3182 
3183 /**
3184  * irdma_post_send -  kernel application wr
3185  * @ibqp: qp ptr for wr
3186  * @ib_wr: work request ptr
3187  * @bad_wr: return of bad wr if err
3188  */
3189 static int irdma_post_send(struct ib_qp *ibqp,
3190 			   const struct ib_send_wr *ib_wr,
3191 			   const struct ib_send_wr **bad_wr)
3192 {
3193 	struct irdma_qp *iwqp;
3194 	struct irdma_qp_uk *ukqp;
3195 	struct irdma_sc_dev *dev;
3196 	struct irdma_post_sq_info info;
3197 	int err = 0;
3198 	unsigned long flags;
3199 	bool inv_stag;
3200 	struct irdma_ah *ah;
3201 
3202 	iwqp = to_iwqp(ibqp);
3203 	ukqp = &iwqp->sc_qp.qp_uk;
3204 	dev = &iwqp->iwdev->rf->sc_dev;
3205 
3206 	spin_lock_irqsave(&iwqp->lock, flags);
3207 	while (ib_wr) {
3208 		memset(&info, 0, sizeof(info));
3209 		inv_stag = false;
3210 		info.wr_id = (ib_wr->wr_id);
3211 		if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3212 			info.signaled = true;
3213 		if (ib_wr->send_flags & IB_SEND_FENCE)
3214 			info.read_fence = true;
3215 		switch (ib_wr->opcode) {
3216 		case IB_WR_SEND_WITH_IMM:
3217 			if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
3218 				info.imm_data_valid = true;
3219 				info.imm_data = ntohl(ib_wr->ex.imm_data);
3220 			} else {
3221 				err = -EINVAL;
3222 				break;
3223 			}
3224 			fallthrough;
3225 		case IB_WR_SEND:
3226 		case IB_WR_SEND_WITH_INV:
3227 			if (ib_wr->opcode == IB_WR_SEND ||
3228 			    ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
3229 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
3230 					info.op_type = IRDMA_OP_TYPE_SEND_SOL;
3231 				else
3232 					info.op_type = IRDMA_OP_TYPE_SEND;
3233 			} else {
3234 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
3235 					info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
3236 				else
3237 					info.op_type = IRDMA_OP_TYPE_SEND_INV;
3238 				info.stag_to_inv = ib_wr->ex.invalidate_rkey;
3239 			}
3240 
3241 			info.op.send.num_sges = ib_wr->num_sge;
3242 			info.op.send.sg_list = ib_wr->sg_list;
3243 			if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3244 			    iwqp->ibqp.qp_type == IB_QPT_GSI) {
3245 				ah = to_iwah(ud_wr(ib_wr)->ah);
3246 				info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
3247 				info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
3248 				info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3249 			}
3250 
3251 			if (ib_wr->send_flags & IB_SEND_INLINE)
3252 				err = irdma_uk_inline_send(ukqp, &info, false);
3253 			else
3254 				err = irdma_uk_send(ukqp, &info, false);
3255 			break;
3256 		case IB_WR_RDMA_WRITE_WITH_IMM:
3257 			if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
3258 				info.imm_data_valid = true;
3259 				info.imm_data = ntohl(ib_wr->ex.imm_data);
3260 			} else {
3261 				err = -EINVAL;
3262 				break;
3263 			}
3264 			fallthrough;
3265 		case IB_WR_RDMA_WRITE:
3266 			if (ib_wr->send_flags & IB_SEND_SOLICITED)
3267 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
3268 			else
3269 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
3270 
3271 			info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
3272 			info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
3273 			info.op.rdma_write.rem_addr.addr =
3274 				rdma_wr(ib_wr)->remote_addr;
3275 			info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3276 			if (ib_wr->send_flags & IB_SEND_INLINE)
3277 				err = irdma_uk_inline_rdma_write(ukqp, &info, false);
3278 			else
3279 				err = irdma_uk_rdma_write(ukqp, &info, false);
3280 			break;
3281 		case IB_WR_RDMA_READ_WITH_INV:
3282 			inv_stag = true;
3283 			fallthrough;
3284 		case IB_WR_RDMA_READ:
3285 			if (ib_wr->num_sge >
3286 			    dev->hw_attrs.uk_attrs.max_hw_read_sges) {
3287 				err = -EINVAL;
3288 				break;
3289 			}
3290 			info.op_type = IRDMA_OP_TYPE_RDMA_READ;
3291 			info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
3292 			info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3293 			info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
3294 			info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
3295 			err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
3296 			break;
3297 		case IB_WR_LOCAL_INV:
3298 			info.op_type = IRDMA_OP_TYPE_INV_STAG;
3299 			info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
3300 			err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
3301 			break;
3302 		case IB_WR_REG_MR: {
3303 			struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
3304 			struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
3305 			struct irdma_fast_reg_stag_info stag_info = {};
3306 
3307 			stag_info.signaled = info.signaled;
3308 			stag_info.read_fence = info.read_fence;
3309 			stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
3310 			stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
3311 			stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
3312 			stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
3313 			stag_info.wr_id = ib_wr->wr_id;
3314 			stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3315 			stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
3316 			stag_info.total_len = iwmr->ibmr.length;
3317 			stag_info.reg_addr_pa = *palloc->level1.addr;
3318 			stag_info.first_pm_pbl_index = palloc->level1.idx;
3319 			stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
3320 			if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
3321 				stag_info.chunk_size = 1;
3322 			err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3323 							true);
3324 			break;
3325 		}
3326 		default:
3327 			err = -EINVAL;
3328 			ibdev_dbg(&iwqp->iwdev->ibdev,
3329 				  "VERBS: upost_send bad opcode = 0x%x\n",
3330 				  ib_wr->opcode);
3331 			break;
3332 		}
3333 
3334 		if (err)
3335 			break;
3336 		ib_wr = ib_wr->next;
3337 	}
3338 
3339 	if (!iwqp->flush_issued) {
3340 		if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
3341 			irdma_uk_qp_post_wr(ukqp);
3342 		spin_unlock_irqrestore(&iwqp->lock, flags);
3343 	} else {
3344 		spin_unlock_irqrestore(&iwqp->lock, flags);
3345 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3346 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3347 	}
3348 	if (err)
3349 		*bad_wr = ib_wr;
3350 
3351 	return err;
3352 }
3353 
3354 /**
3355  * irdma_post_recv - post receive wr for kernel application
3356  * @ibqp: ib qp pointer
3357  * @ib_wr: work request for receive
3358  * @bad_wr: bad wr caused an error
3359  */
3360 static int irdma_post_recv(struct ib_qp *ibqp,
3361 			   const struct ib_recv_wr *ib_wr,
3362 			   const struct ib_recv_wr **bad_wr)
3363 {
3364 	struct irdma_qp *iwqp;
3365 	struct irdma_qp_uk *ukqp;
3366 	struct irdma_post_rq_info post_recv = {};
3367 	unsigned long flags;
3368 	int err = 0;
3369 
3370 	iwqp = to_iwqp(ibqp);
3371 	ukqp = &iwqp->sc_qp.qp_uk;
3372 
3373 	spin_lock_irqsave(&iwqp->lock, flags);
3374 	while (ib_wr) {
3375 		post_recv.num_sges = ib_wr->num_sge;
3376 		post_recv.wr_id = ib_wr->wr_id;
3377 		post_recv.sg_list = ib_wr->sg_list;
3378 		err = irdma_uk_post_receive(ukqp, &post_recv);
3379 		if (err) {
3380 			ibdev_dbg(&iwqp->iwdev->ibdev,
3381 				  "VERBS: post_recv err %d\n", err);
3382 			goto out;
3383 		}
3384 
3385 		ib_wr = ib_wr->next;
3386 	}
3387 
3388 out:
3389 	spin_unlock_irqrestore(&iwqp->lock, flags);
3390 	if (iwqp->flush_issued)
3391 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3392 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3393 
3394 	if (err)
3395 		*bad_wr = ib_wr;
3396 
3397 	return err;
3398 }
3399 
3400 /**
3401  * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
3402  * @opcode: iwarp flush code
3403  */
3404 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
3405 {
3406 	switch (opcode) {
3407 	case FLUSH_PROT_ERR:
3408 		return IB_WC_LOC_PROT_ERR;
3409 	case FLUSH_REM_ACCESS_ERR:
3410 		return IB_WC_REM_ACCESS_ERR;
3411 	case FLUSH_LOC_QP_OP_ERR:
3412 		return IB_WC_LOC_QP_OP_ERR;
3413 	case FLUSH_REM_OP_ERR:
3414 		return IB_WC_REM_OP_ERR;
3415 	case FLUSH_LOC_LEN_ERR:
3416 		return IB_WC_LOC_LEN_ERR;
3417 	case FLUSH_GENERAL_ERR:
3418 		return IB_WC_WR_FLUSH_ERR;
3419 	case FLUSH_RETRY_EXC_ERR:
3420 		return IB_WC_RETRY_EXC_ERR;
3421 	case FLUSH_MW_BIND_ERR:
3422 		return IB_WC_MW_BIND_ERR;
3423 	case FLUSH_REM_INV_REQ_ERR:
3424 		return IB_WC_REM_INV_REQ_ERR;
3425 	case FLUSH_FATAL_ERR:
3426 	default:
3427 		return IB_WC_FATAL_ERR;
3428 	}
3429 }
3430 
3431 /**
3432  * irdma_process_cqe - process cqe info
3433  * @entry: processed cqe
3434  * @cq_poll_info: cqe info
3435  */
3436 static void irdma_process_cqe(struct ib_wc *entry,
3437 			      struct irdma_cq_poll_info *cq_poll_info)
3438 {
3439 	struct irdma_sc_qp *qp;
3440 
3441 	entry->wc_flags = 0;
3442 	entry->pkey_index = 0;
3443 	entry->wr_id = cq_poll_info->wr_id;
3444 
3445 	qp = cq_poll_info->qp_handle;
3446 	entry->qp = qp->qp_uk.back_qp;
3447 
3448 	if (cq_poll_info->error) {
3449 		entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
3450 				irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
3451 
3452 		entry->vendor_err = cq_poll_info->major_err << 16 |
3453 				    cq_poll_info->minor_err;
3454 	} else {
3455 		entry->status = IB_WC_SUCCESS;
3456 		if (cq_poll_info->imm_valid) {
3457 			entry->ex.imm_data = htonl(cq_poll_info->imm_data);
3458 			entry->wc_flags |= IB_WC_WITH_IMM;
3459 		}
3460 		if (cq_poll_info->ud_smac_valid) {
3461 			ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
3462 			entry->wc_flags |= IB_WC_WITH_SMAC;
3463 		}
3464 
3465 		if (cq_poll_info->ud_vlan_valid) {
3466 			u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
3467 
3468 			entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
3469 			if (vlan) {
3470 				entry->vlan_id = vlan;
3471 				entry->wc_flags |= IB_WC_WITH_VLAN;
3472 			}
3473 		} else {
3474 			entry->sl = 0;
3475 		}
3476 	}
3477 
3478 	if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
3479 		set_ib_wc_op_sq(cq_poll_info, entry);
3480 	} else {
3481 		set_ib_wc_op_rq(cq_poll_info, entry,
3482 				qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
3483 				true : false);
3484 		if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
3485 		    cq_poll_info->stag_invalid_set) {
3486 			entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
3487 			entry->wc_flags |= IB_WC_WITH_INVALIDATE;
3488 		}
3489 	}
3490 
3491 	if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
3492 		entry->src_qp = cq_poll_info->ud_src_qpn;
3493 		entry->slid = 0;
3494 		entry->wc_flags |=
3495 			(IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3496 		entry->network_hdr_type = cq_poll_info->ipv4 ?
3497 						  RDMA_NETWORK_IPV4 :
3498 						  RDMA_NETWORK_IPV6;
3499 	} else {
3500 		entry->src_qp = cq_poll_info->qp_id;
3501 	}
3502 
3503 	entry->byte_len = cq_poll_info->bytes_xfered;
3504 }
3505 
3506 /**
3507  * irdma_poll_one - poll one entry of the CQ
3508  * @ukcq: ukcq to poll
3509  * @cur_cqe: current CQE info to be filled in
3510  * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3511  *
3512  * Returns the internal irdma device error code or 0 on success
3513  */
3514 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
3515 				 struct irdma_cq_poll_info *cur_cqe,
3516 				 struct ib_wc *entry)
3517 {
3518 	int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3519 
3520 	if (ret)
3521 		return ret;
3522 
3523 	irdma_process_cqe(entry, cur_cqe);
3524 
3525 	return 0;
3526 }
3527 
3528 /**
3529  * __irdma_poll_cq - poll cq for completion (kernel apps)
3530  * @iwcq: cq to poll
3531  * @num_entries: number of entries to poll
3532  * @entry: wr of a completed entry
3533  */
3534 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3535 {
3536 	struct list_head *tmp_node, *list_node;
3537 	struct irdma_cq_buf *last_buf = NULL;
3538 	struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3539 	struct irdma_cq_buf *cq_buf;
3540 	int ret;
3541 	struct irdma_device *iwdev;
3542 	struct irdma_cq_uk *ukcq;
3543 	bool cq_new_cqe = false;
3544 	int resized_bufs = 0;
3545 	int npolled = 0;
3546 
3547 	iwdev = to_iwdev(iwcq->ibcq.device);
3548 	ukcq = &iwcq->sc_cq.cq_uk;
3549 
3550 	/* go through the list of previously resized CQ buffers */
3551 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3552 		cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3553 		while (npolled < num_entries) {
3554 			ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3555 			if (!ret) {
3556 				++npolled;
3557 				cq_new_cqe = true;
3558 				continue;
3559 			}
3560 			if (ret == -ENOENT)
3561 				break;
3562 			 /* QP using the CQ is destroyed. Skip reporting this CQE */
3563 			if (ret == -EFAULT) {
3564 				cq_new_cqe = true;
3565 				continue;
3566 			}
3567 			goto error;
3568 		}
3569 
3570 		/* save the resized CQ buffer which received the last cqe */
3571 		if (cq_new_cqe)
3572 			last_buf = cq_buf;
3573 		cq_new_cqe = false;
3574 	}
3575 
3576 	/* check the current CQ for new cqes */
3577 	while (npolled < num_entries) {
3578 		ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3579 		if (ret == -ENOENT) {
3580 			ret = irdma_generated_cmpls(iwcq, cur_cqe);
3581 			if (!ret)
3582 				irdma_process_cqe(entry + npolled, cur_cqe);
3583 		}
3584 		if (!ret) {
3585 			++npolled;
3586 			cq_new_cqe = true;
3587 			continue;
3588 		}
3589 
3590 		if (ret == -ENOENT)
3591 			break;
3592 		/* QP using the CQ is destroyed. Skip reporting this CQE */
3593 		if (ret == -EFAULT) {
3594 			cq_new_cqe = true;
3595 			continue;
3596 		}
3597 		goto error;
3598 	}
3599 
3600 	if (cq_new_cqe)
3601 		/* all previous CQ resizes are complete */
3602 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3603 	else if (last_buf)
3604 		/* only CQ resizes up to the last_buf are complete */
3605 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3606 	if (resized_bufs)
3607 		/* report to the HW the number of complete CQ resizes */
3608 		irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3609 
3610 	return npolled;
3611 error:
3612 	ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3613 		  __func__, ret);
3614 
3615 	return ret;
3616 }
3617 
3618 /**
3619  * irdma_poll_cq - poll cq for completion (kernel apps)
3620  * @ibcq: cq to poll
3621  * @num_entries: number of entries to poll
3622  * @entry: wr of a completed entry
3623  */
3624 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3625 			 struct ib_wc *entry)
3626 {
3627 	struct irdma_cq *iwcq;
3628 	unsigned long flags;
3629 	int ret;
3630 
3631 	iwcq = to_iwcq(ibcq);
3632 
3633 	spin_lock_irqsave(&iwcq->lock, flags);
3634 	ret = __irdma_poll_cq(iwcq, num_entries, entry);
3635 	spin_unlock_irqrestore(&iwcq->lock, flags);
3636 
3637 	return ret;
3638 }
3639 
3640 /**
3641  * irdma_req_notify_cq - arm cq kernel application
3642  * @ibcq: cq to arm
3643  * @notify_flags: notofication flags
3644  */
3645 static int irdma_req_notify_cq(struct ib_cq *ibcq,
3646 			       enum ib_cq_notify_flags notify_flags)
3647 {
3648 	struct irdma_cq *iwcq;
3649 	struct irdma_cq_uk *ukcq;
3650 	unsigned long flags;
3651 	enum irdma_cmpl_notify cq_notify;
3652 	bool promo_event = false;
3653 	int ret = 0;
3654 
3655 	cq_notify = notify_flags == IB_CQ_SOLICITED ?
3656 		    IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
3657 	iwcq = to_iwcq(ibcq);
3658 	ukcq = &iwcq->sc_cq.cq_uk;
3659 
3660 	spin_lock_irqsave(&iwcq->lock, flags);
3661 	/* Only promote to arm the CQ for any event if the last arm event was solicited. */
3662 	if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
3663 		promo_event = true;
3664 
3665 	if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3666 		iwcq->last_notify = cq_notify;
3667 		irdma_uk_cq_request_notification(ukcq, cq_notify);
3668 	}
3669 
3670 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3671 	    (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3672 		ret = 1;
3673 	spin_unlock_irqrestore(&iwcq->lock, flags);
3674 
3675 	return ret;
3676 }
3677 
3678 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
3679 				     struct ib_port_immutable *immutable)
3680 {
3681 	struct ib_port_attr attr;
3682 	int err;
3683 
3684 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3685 	err = ib_query_port(ibdev, port_num, &attr);
3686 	if (err)
3687 		return err;
3688 
3689 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3690 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
3691 	immutable->gid_tbl_len = attr.gid_tbl_len;
3692 
3693 	return 0;
3694 }
3695 
3696 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
3697 				   struct ib_port_immutable *immutable)
3698 {
3699 	struct ib_port_attr attr;
3700 	int err;
3701 
3702 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
3703 	err = ib_query_port(ibdev, port_num, &attr);
3704 	if (err)
3705 		return err;
3706 	immutable->gid_tbl_len = attr.gid_tbl_len;
3707 
3708 	return 0;
3709 }
3710 
3711 static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
3712 	/* 32bit names */
3713 	[IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
3714 	[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
3715 	[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
3716 	[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
3717 	[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
3718 	[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
3719 	[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
3720 	[IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
3721 	[IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
3722 	[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
3723 	[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
3724 	[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
3725 	[IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
3726 
3727 	/* 64bit names */
3728 	[IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3729 		"ip4InOctets",
3730 	[IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3731 		"ip4InPkts",
3732 	[IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3733 		"ip4InReasmRqd",
3734 	[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3735 		"ip4InMcastOctets",
3736 	[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3737 		"ip4InMcastPkts",
3738 	[IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3739 		"ip4OutOctets",
3740 	[IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3741 		"ip4OutPkts",
3742 	[IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3743 		"ip4OutSegRqd",
3744 	[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3745 		"ip4OutMcastOctets",
3746 	[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3747 		"ip4OutMcastPkts",
3748 	[IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3749 		"ip6InOctets",
3750 	[IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3751 		"ip6InPkts",
3752 	[IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3753 		"ip6InReasmRqd",
3754 	[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3755 		"ip6InMcastOctets",
3756 	[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3757 		"ip6InMcastPkts",
3758 	[IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3759 		"ip6OutOctets",
3760 	[IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3761 		"ip6OutPkts",
3762 	[IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3763 		"ip6OutSegRqd",
3764 	[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3765 		"ip6OutMcastOctets",
3766 	[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3767 		"ip6OutMcastPkts",
3768 	[IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3769 		"tcpInSegs",
3770 	[IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32].name =
3771 		"tcpOutSegs",
3772 	[IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3773 		"iwInRdmaReads",
3774 	[IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3775 		"iwInRdmaSends",
3776 	[IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3777 		"iwInRdmaWrites",
3778 	[IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3779 		"iwOutRdmaReads",
3780 	[IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3781 		"iwOutRdmaSends",
3782 	[IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3783 		"iwOutRdmaWrites",
3784 	[IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32].name =
3785 		"iwRdmaBnd",
3786 	[IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32].name =
3787 		"iwRdmaInv",
3788 	[IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3789 		"RxUDP",
3790 	[IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3791 		"TxUDP",
3792 	[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32]
3793 		.name = "RxECNMrkd",
3794 };
3795 
3796 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
3797 {
3798 	struct irdma_device *iwdev = to_iwdev(dev);
3799 
3800 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
3801 		 irdma_fw_major_ver(&iwdev->rf->sc_dev),
3802 		 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
3803 }
3804 
3805 /**
3806  * irdma_alloc_hw_port_stats - Allocate a hw stats structure
3807  * @ibdev: device pointer from stack
3808  * @port_num: port number
3809  */
3810 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
3811 						       u32 port_num)
3812 {
3813 	int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 +
3814 			   IRDMA_HW_STAT_INDEX_MAX_64;
3815 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
3816 
3817 	BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_descs) !=
3818 		     (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
3819 
3820 	return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
3821 					  lifespan);
3822 }
3823 
3824 /**
3825  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
3826  * @ibdev: device pointer from stack
3827  * @stats: stats pointer from stack
3828  * @port_num: port number
3829  * @index: which hw counter the stack is requesting we update
3830  */
3831 static int irdma_get_hw_stats(struct ib_device *ibdev,
3832 			      struct rdma_hw_stats *stats, u32 port_num,
3833 			      int index)
3834 {
3835 	struct irdma_device *iwdev = to_iwdev(ibdev);
3836 	struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
3837 
3838 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
3839 		irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
3840 	else
3841 		irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
3842 
3843 	memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
3844 
3845 	return stats->num_counters;
3846 }
3847 
3848 /**
3849  * irdma_query_gid - Query port GID
3850  * @ibdev: device pointer from stack
3851  * @port: port number
3852  * @index: Entry index
3853  * @gid: Global ID
3854  */
3855 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
3856 			   union ib_gid *gid)
3857 {
3858 	struct irdma_device *iwdev = to_iwdev(ibdev);
3859 
3860 	memset(gid->raw, 0, sizeof(gid->raw));
3861 	ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
3862 
3863 	return 0;
3864 }
3865 
3866 /**
3867  * mcast_list_add -  Add a new mcast item to list
3868  * @rf: RDMA PCI function
3869  * @new_elem: pointer to element to add
3870  */
3871 static void mcast_list_add(struct irdma_pci_f *rf,
3872 			   struct mc_table_list *new_elem)
3873 {
3874 	list_add(&new_elem->list, &rf->mc_qht_list.list);
3875 }
3876 
3877 /**
3878  * mcast_list_del - Remove an mcast item from list
3879  * @mc_qht_elem: pointer to mcast table list element
3880  */
3881 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
3882 {
3883 	if (mc_qht_elem)
3884 		list_del(&mc_qht_elem->list);
3885 }
3886 
3887 /**
3888  * mcast_list_lookup_ip - Search mcast list for address
3889  * @rf: RDMA PCI function
3890  * @ip_mcast: pointer to mcast IP address
3891  */
3892 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
3893 						  u32 *ip_mcast)
3894 {
3895 	struct mc_table_list *mc_qht_el;
3896 	struct list_head *pos, *q;
3897 
3898 	list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
3899 		mc_qht_el = list_entry(pos, struct mc_table_list, list);
3900 		if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3901 			    sizeof(mc_qht_el->mc_info.dest_ip)))
3902 			return mc_qht_el;
3903 	}
3904 
3905 	return NULL;
3906 }
3907 
3908 /**
3909  * irdma_mcast_cqp_op - perform a mcast cqp operation
3910  * @iwdev: irdma device
3911  * @mc_grp_ctx: mcast group info
3912  * @op: operation
3913  *
3914  * returns error status
3915  */
3916 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
3917 			      struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3918 {
3919 	struct cqp_cmds_info *cqp_info;
3920 	struct irdma_cqp_request *cqp_request;
3921 	int status;
3922 
3923 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3924 	if (!cqp_request)
3925 		return -ENOMEM;
3926 
3927 	cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3928 	cqp_info = &cqp_request->info;
3929 	cqp_info->cqp_cmd = op;
3930 	cqp_info->post_sq = 1;
3931 	cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3932 	cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3933 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3934 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3935 
3936 	return status;
3937 }
3938 
3939 /**
3940  * irdma_mcast_mac - Get the multicast MAC for an IP address
3941  * @ip_addr: IPv4 or IPv6 address
3942  * @mac: pointer to result MAC address
3943  * @ipv4: flag indicating IPv4 or IPv6
3944  *
3945  */
3946 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
3947 {
3948 	u8 *ip = (u8 *)ip_addr;
3949 
3950 	if (ipv4) {
3951 		unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
3952 						0x00, 0x00};
3953 
3954 		mac4[3] = ip[2] & 0x7F;
3955 		mac4[4] = ip[1];
3956 		mac4[5] = ip[0];
3957 		ether_addr_copy(mac, mac4);
3958 	} else {
3959 		unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
3960 						0x00, 0x00};
3961 
3962 		mac6[2] = ip[3];
3963 		mac6[3] = ip[2];
3964 		mac6[4] = ip[1];
3965 		mac6[5] = ip[0];
3966 		ether_addr_copy(mac, mac6);
3967 	}
3968 }
3969 
3970 /**
3971  * irdma_attach_mcast - attach a qp to a multicast group
3972  * @ibqp: ptr to qp
3973  * @ibgid: pointer to global ID
3974  * @lid: local ID
3975  *
3976  * returns error status
3977  */
3978 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3979 {
3980 	struct irdma_qp *iwqp = to_iwqp(ibqp);
3981 	struct irdma_device *iwdev = iwqp->iwdev;
3982 	struct irdma_pci_f *rf = iwdev->rf;
3983 	struct mc_table_list *mc_qht_elem;
3984 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
3985 	unsigned long flags;
3986 	u32 ip_addr[4] = {};
3987 	u32 mgn;
3988 	u32 no_mgs;
3989 	int ret = 0;
3990 	bool ipv4;
3991 	u16 vlan_id;
3992 	union irdma_sockaddr sgid_addr;
3993 	unsigned char dmac[ETH_ALEN];
3994 
3995 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3996 
3997 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3998 		irdma_copy_ip_ntohl(ip_addr,
3999 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4000 		irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
4001 		ipv4 = false;
4002 		ibdev_dbg(&iwdev->ibdev,
4003 			  "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
4004 			  ip_addr);
4005 		irdma_mcast_mac(ip_addr, dmac, false);
4006 	} else {
4007 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4008 		ipv4 = true;
4009 		vlan_id = irdma_get_vlan_ipv4(ip_addr);
4010 		irdma_mcast_mac(ip_addr, dmac, true);
4011 		ibdev_dbg(&iwdev->ibdev,
4012 			  "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
4013 			  ibqp->qp_num, ip_addr, dmac);
4014 	}
4015 
4016 	spin_lock_irqsave(&rf->qh_list_lock, flags);
4017 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4018 	if (!mc_qht_elem) {
4019 		struct irdma_dma_mem *dma_mem_mc;
4020 
4021 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4022 		mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
4023 		if (!mc_qht_elem)
4024 			return -ENOMEM;
4025 
4026 		mc_qht_elem->mc_info.ipv4_valid = ipv4;
4027 		memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
4028 		       sizeof(mc_qht_elem->mc_info.dest_ip));
4029 		ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
4030 				       &mgn, &rf->next_mcg);
4031 		if (ret) {
4032 			kfree(mc_qht_elem);
4033 			return -ENOMEM;
4034 		}
4035 
4036 		mc_qht_elem->mc_info.mgn = mgn;
4037 		dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4038 		dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4039 					 IRDMA_HW_PAGE_SIZE);
4040 		dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4041 						    dma_mem_mc->size,
4042 						    &dma_mem_mc->pa,
4043 						    GFP_KERNEL);
4044 		if (!dma_mem_mc->va) {
4045 			irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4046 			kfree(mc_qht_elem);
4047 			return -ENOMEM;
4048 		}
4049 
4050 		mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4051 		memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4052 		       sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4053 		mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4054 		mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4055 		if (vlan_id < VLAN_N_VID)
4056 			mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4057 		mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
4058 		mc_qht_elem->mc_grp_ctx.qs_handle =
4059 			iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4060 		ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4061 
4062 		spin_lock_irqsave(&rf->qh_list_lock, flags);
4063 		mcast_list_add(rf, mc_qht_elem);
4064 	} else {
4065 		if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4066 		    IRDMA_MAX_MGS_PER_CTX) {
4067 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4068 			return -ENOMEM;
4069 		}
4070 	}
4071 
4072 	mcg_info.qp_id = iwqp->ibqp.qp_num;
4073 	no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4074 	irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4075 	spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4076 
4077 	/* Only if there is a change do we need to modify or create */
4078 	if (!no_mgs) {
4079 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4080 					 IRDMA_OP_MC_CREATE);
4081 	} else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4082 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4083 					 IRDMA_OP_MC_MODIFY);
4084 	} else {
4085 		return 0;
4086 	}
4087 
4088 	if (ret)
4089 		goto error;
4090 
4091 	return 0;
4092 
4093 error:
4094 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4095 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4096 		mcast_list_del(mc_qht_elem);
4097 		dma_free_coherent(rf->hw.device,
4098 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4099 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4100 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4101 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4102 		irdma_free_rsrc(rf, rf->allocated_mcgs,
4103 				mc_qht_elem->mc_grp_ctx.mg_id);
4104 		kfree(mc_qht_elem);
4105 	}
4106 
4107 	return ret;
4108 }
4109 
4110 /**
4111  * irdma_detach_mcast - detach a qp from a multicast group
4112  * @ibqp: ptr to qp
4113  * @ibgid: pointer to global ID
4114  * @lid: local ID
4115  *
4116  * returns error status
4117  */
4118 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4119 {
4120 	struct irdma_qp *iwqp = to_iwqp(ibqp);
4121 	struct irdma_device *iwdev = iwqp->iwdev;
4122 	struct irdma_pci_f *rf = iwdev->rf;
4123 	u32 ip_addr[4] = {};
4124 	struct mc_table_list *mc_qht_elem;
4125 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4126 	int ret;
4127 	unsigned long flags;
4128 	union irdma_sockaddr sgid_addr;
4129 
4130 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4131 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4132 		irdma_copy_ip_ntohl(ip_addr,
4133 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4134 	else
4135 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4136 
4137 	spin_lock_irqsave(&rf->qh_list_lock, flags);
4138 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4139 	if (!mc_qht_elem) {
4140 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4141 		ibdev_dbg(&iwdev->ibdev,
4142 			  "VERBS: address not found MCG\n");
4143 		return 0;
4144 	}
4145 
4146 	mcg_info.qp_id = iwqp->ibqp.qp_num;
4147 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4148 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4149 		mcast_list_del(mc_qht_elem);
4150 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4151 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4152 					 IRDMA_OP_MC_DESTROY);
4153 		if (ret) {
4154 			ibdev_dbg(&iwdev->ibdev,
4155 				  "VERBS: failed MC_DESTROY MCG\n");
4156 			spin_lock_irqsave(&rf->qh_list_lock, flags);
4157 			mcast_list_add(rf, mc_qht_elem);
4158 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4159 			return -EAGAIN;
4160 		}
4161 
4162 		dma_free_coherent(rf->hw.device,
4163 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4164 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4165 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4166 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4167 		irdma_free_rsrc(rf, rf->allocated_mcgs,
4168 				mc_qht_elem->mc_grp_ctx.mg_id);
4169 		kfree(mc_qht_elem);
4170 	} else {
4171 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4172 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4173 					 IRDMA_OP_MC_MODIFY);
4174 		if (ret) {
4175 			ibdev_dbg(&iwdev->ibdev,
4176 				  "VERBS: failed Modify MCG\n");
4177 			return ret;
4178 		}
4179 	}
4180 
4181 	return 0;
4182 }
4183 
4184 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
4185 {
4186 	struct irdma_pci_f *rf = iwdev->rf;
4187 	int err;
4188 
4189 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
4190 			       &rf->next_ah);
4191 	if (err)
4192 		return err;
4193 
4194 	err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
4195 			      irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
4196 
4197 	if (err) {
4198 		ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
4199 		goto err_ah_create;
4200 	}
4201 
4202 	if (!sleep) {
4203 		int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
4204 
4205 		do {
4206 			irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
4207 			mdelay(1);
4208 		} while (!ah->sc_ah.ah_info.ah_valid && --cnt);
4209 
4210 		if (!cnt) {
4211 			ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
4212 			err = -ETIMEDOUT;
4213 			goto err_ah_create;
4214 		}
4215 	}
4216 	return 0;
4217 
4218 err_ah_create:
4219 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
4220 
4221 	return err;
4222 }
4223 
4224 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
4225 {
4226 	struct irdma_pd *pd = to_iwpd(ibah->pd);
4227 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4228 	struct rdma_ah_attr *ah_attr = attr->ah_attr;
4229 	const struct ib_gid_attr *sgid_attr;
4230 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4231 	struct irdma_pci_f *rf = iwdev->rf;
4232 	struct irdma_sc_ah *sc_ah;
4233 	struct irdma_ah_info *ah_info;
4234 	union irdma_sockaddr sgid_addr, dgid_addr;
4235 	int err;
4236 	u8 dmac[ETH_ALEN];
4237 
4238 	ah->pd = pd;
4239 	sc_ah = &ah->sc_ah;
4240 	sc_ah->ah_info.vsi = &iwdev->vsi;
4241 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
4242 	ah->sgid_index = ah_attr->grh.sgid_index;
4243 	sgid_attr = ah_attr->grh.sgid_attr;
4244 	memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
4245 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
4246 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
4247 	ah->av.attrs = *ah_attr;
4248 	ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
4249 	ah_info = &sc_ah->ah_info;
4250 	ah_info->pd_idx = pd->sc_pd.pd_id;
4251 	if (ah_attr->ah_flags & IB_AH_GRH) {
4252 		ah_info->flow_label = ah_attr->grh.flow_label;
4253 		ah_info->hop_ttl = ah_attr->grh.hop_limit;
4254 		ah_info->tc_tos = ah_attr->grh.traffic_class;
4255 	}
4256 
4257 	ether_addr_copy(dmac, ah_attr->roce.dmac);
4258 	if (ah->av.net_type == RDMA_NETWORK_IPV4) {
4259 		ah_info->ipv4_valid = true;
4260 		ah_info->dest_ip_addr[0] =
4261 			ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
4262 		ah_info->src_ip_addr[0] =
4263 			ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4264 		ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
4265 						     ah_info->dest_ip_addr[0]);
4266 		if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
4267 			ah_info->do_lpbk = true;
4268 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
4269 		}
4270 	} else {
4271 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
4272 				    dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4273 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
4274 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4275 		ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
4276 						     ah_info->dest_ip_addr);
4277 		if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
4278 			ah_info->do_lpbk = true;
4279 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
4280 		}
4281 	}
4282 
4283 	err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
4284 				      ah_info->mac_addr);
4285 	if (err)
4286 		return err;
4287 
4288 	ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4289 					      ah_info->ipv4_valid, dmac);
4290 
4291 	if (ah_info->dst_arpindex == -1)
4292 		return -EINVAL;
4293 
4294 	if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
4295 		ah_info->vlan_tag = 0;
4296 
4297 	if (ah_info->vlan_tag < VLAN_N_VID) {
4298 		ah_info->insert_vlan_tag = true;
4299 		ah_info->vlan_tag |=
4300 			rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
4301 	}
4302 
4303 	return 0;
4304 }
4305 
4306 /**
4307  * irdma_ah_exists - Check for existing identical AH
4308  * @iwdev: irdma device
4309  * @new_ah: AH to check for
4310  *
4311  * returns true if AH is found, false if not found.
4312  */
4313 static bool irdma_ah_exists(struct irdma_device *iwdev,
4314 			    struct irdma_ah *new_ah)
4315 {
4316 	struct irdma_ah *ah;
4317 	u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4318 		  new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4319 		  new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4320 		  new_ah->sc_ah.ah_info.dest_ip_addr[3];
4321 
4322 	hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
4323 		/* Set ah_valid and ah_id the same so memcmp can work */
4324 		new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
4325 		new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
4326 		if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
4327 			    sizeof(ah->sc_ah.ah_info))) {
4328 			refcount_inc(&ah->refcnt);
4329 			new_ah->parent_ah = ah;
4330 			return true;
4331 		}
4332 	}
4333 
4334 	return false;
4335 }
4336 
4337 /**
4338  * irdma_destroy_ah - Destroy address handle
4339  * @ibah: pointer to address handle
4340  * @ah_flags: flags for sleepable
4341  */
4342 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
4343 {
4344 	struct irdma_device *iwdev = to_iwdev(ibah->device);
4345 	struct irdma_ah *ah = to_iwah(ibah);
4346 
4347 	if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
4348 		mutex_lock(&iwdev->ah_tbl_lock);
4349 		if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
4350 			mutex_unlock(&iwdev->ah_tbl_lock);
4351 			return 0;
4352 		}
4353 		hash_del(&ah->parent_ah->list);
4354 		kfree(ah->parent_ah);
4355 		mutex_unlock(&iwdev->ah_tbl_lock);
4356 	}
4357 
4358 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4359 			false, NULL, ah);
4360 
4361 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4362 			ah->sc_ah.ah_info.ah_idx);
4363 
4364 	return 0;
4365 }
4366 
4367 /**
4368  * irdma_create_user_ah - create user address handle
4369  * @ibah: address handle
4370  * @attr: address handle attributes
4371  * @udata: User data
4372  *
4373  * returns 0 on success, error otherwise
4374  */
4375 static int irdma_create_user_ah(struct ib_ah *ibah,
4376 				struct rdma_ah_init_attr *attr,
4377 				struct ib_udata *udata)
4378 {
4379 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
4380 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4381 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4382 	struct irdma_create_ah_resp uresp;
4383 	struct irdma_ah *parent_ah;
4384 	int err;
4385 
4386 	if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
4387 		return -EINVAL;
4388 
4389 	err = irdma_setup_ah(ibah, attr);
4390 	if (err)
4391 		return err;
4392 	mutex_lock(&iwdev->ah_tbl_lock);
4393 	if (!irdma_ah_exists(iwdev, ah)) {
4394 		err = irdma_create_hw_ah(iwdev, ah, true);
4395 		if (err) {
4396 			mutex_unlock(&iwdev->ah_tbl_lock);
4397 			return err;
4398 		}
4399 		/* Add new AH to list */
4400 		parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
4401 		if (parent_ah) {
4402 			u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4403 				  parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4404 				  parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4405 				  parent_ah->sc_ah.ah_info.dest_ip_addr[3];
4406 
4407 			ah->parent_ah = parent_ah;
4408 			hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
4409 			refcount_set(&parent_ah->refcnt, 1);
4410 		}
4411 	}
4412 	mutex_unlock(&iwdev->ah_tbl_lock);
4413 
4414 	uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
4415 	err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
4416 	if (err)
4417 		irdma_destroy_ah(ibah, attr->flags);
4418 
4419 	return err;
4420 }
4421 
4422 /**
4423  * irdma_create_ah - create address handle
4424  * @ibah: address handle
4425  * @attr: address handle attributes
4426  * @udata: NULL
4427  *
4428  * returns 0 on success, error otherwise
4429  */
4430 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
4431 			   struct ib_udata *udata)
4432 {
4433 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4434 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4435 	int err;
4436 
4437 	err = irdma_setup_ah(ibah, attr);
4438 	if (err)
4439 		return err;
4440 	err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
4441 
4442 	return err;
4443 }
4444 
4445 /**
4446  * irdma_query_ah - Query address handle
4447  * @ibah: pointer to address handle
4448  * @ah_attr: address handle attributes
4449  */
4450 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
4451 {
4452 	struct irdma_ah *ah = to_iwah(ibah);
4453 
4454 	memset(ah_attr, 0, sizeof(*ah_attr));
4455 	if (ah->av.attrs.ah_flags & IB_AH_GRH) {
4456 		ah_attr->ah_flags = IB_AH_GRH;
4457 		ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
4458 		ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
4459 		ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
4460 		ah_attr->grh.sgid_index = ah->sgid_index;
4461 		ah_attr->grh.sgid_index = ah->sgid_index;
4462 		memcpy(&ah_attr->grh.dgid, &ah->dgid,
4463 		       sizeof(ah_attr->grh.dgid));
4464 	}
4465 
4466 	return 0;
4467 }
4468 
4469 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
4470 						 u32 port_num)
4471 {
4472 	return IB_LINK_LAYER_ETHERNET;
4473 }
4474 
4475 static const struct ib_device_ops irdma_roce_dev_ops = {
4476 	.attach_mcast = irdma_attach_mcast,
4477 	.create_ah = irdma_create_ah,
4478 	.create_user_ah = irdma_create_user_ah,
4479 	.destroy_ah = irdma_destroy_ah,
4480 	.detach_mcast = irdma_detach_mcast,
4481 	.get_link_layer = irdma_get_link_layer,
4482 	.get_port_immutable = irdma_roce_port_immutable,
4483 	.modify_qp = irdma_modify_qp_roce,
4484 	.query_ah = irdma_query_ah,
4485 	.query_pkey = irdma_query_pkey,
4486 };
4487 
4488 static const struct ib_device_ops irdma_iw_dev_ops = {
4489 	.modify_qp = irdma_modify_qp,
4490 	.get_port_immutable = irdma_iw_port_immutable,
4491 	.query_gid = irdma_query_gid,
4492 };
4493 
4494 static const struct ib_device_ops irdma_dev_ops = {
4495 	.owner = THIS_MODULE,
4496 	.driver_id = RDMA_DRIVER_IRDMA,
4497 	.uverbs_abi_ver = IRDMA_ABI_VER,
4498 
4499 	.alloc_hw_port_stats = irdma_alloc_hw_port_stats,
4500 	.alloc_mr = irdma_alloc_mr,
4501 	.alloc_mw = irdma_alloc_mw,
4502 	.alloc_pd = irdma_alloc_pd,
4503 	.alloc_ucontext = irdma_alloc_ucontext,
4504 	.create_cq = irdma_create_cq,
4505 	.create_qp = irdma_create_qp,
4506 	.dealloc_driver = irdma_ib_dealloc_device,
4507 	.dealloc_mw = irdma_dealloc_mw,
4508 	.dealloc_pd = irdma_dealloc_pd,
4509 	.dealloc_ucontext = irdma_dealloc_ucontext,
4510 	.dereg_mr = irdma_dereg_mr,
4511 	.destroy_cq = irdma_destroy_cq,
4512 	.destroy_qp = irdma_destroy_qp,
4513 	.disassociate_ucontext = irdma_disassociate_ucontext,
4514 	.get_dev_fw_str = irdma_get_dev_fw_str,
4515 	.get_dma_mr = irdma_get_dma_mr,
4516 	.get_hw_stats = irdma_get_hw_stats,
4517 	.map_mr_sg = irdma_map_mr_sg,
4518 	.mmap = irdma_mmap,
4519 	.mmap_free = irdma_mmap_free,
4520 	.poll_cq = irdma_poll_cq,
4521 	.post_recv = irdma_post_recv,
4522 	.post_send = irdma_post_send,
4523 	.query_device = irdma_query_device,
4524 	.query_port = irdma_query_port,
4525 	.query_qp = irdma_query_qp,
4526 	.reg_user_mr = irdma_reg_user_mr,
4527 	.reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
4528 	.req_notify_cq = irdma_req_notify_cq,
4529 	.resize_cq = irdma_resize_cq,
4530 	INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
4531 	INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
4532 	INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
4533 	INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
4534 	INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
4535 	INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
4536 };
4537 
4538 /**
4539  * irdma_init_roce_device - initialization of roce rdma device
4540  * @iwdev: irdma device
4541  */
4542 static void irdma_init_roce_device(struct irdma_device *iwdev)
4543 {
4544 	iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4545 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4546 			    iwdev->netdev->dev_addr);
4547 	ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4548 }
4549 
4550 /**
4551  * irdma_init_iw_device - initialization of iwarp rdma device
4552  * @iwdev: irdma device
4553  */
4554 static int irdma_init_iw_device(struct irdma_device *iwdev)
4555 {
4556 	struct net_device *netdev = iwdev->netdev;
4557 
4558 	iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4559 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4560 			    netdev->dev_addr);
4561 	iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
4562 	iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
4563 	iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
4564 	iwdev->ibdev.ops.iw_connect = irdma_connect;
4565 	iwdev->ibdev.ops.iw_accept = irdma_accept;
4566 	iwdev->ibdev.ops.iw_reject = irdma_reject;
4567 	iwdev->ibdev.ops.iw_create_listen = irdma_create_listen;
4568 	iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;
4569 	memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4570 	       sizeof(iwdev->ibdev.iw_ifname));
4571 	ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4572 
4573 	return 0;
4574 }
4575 
4576 /**
4577  * irdma_init_rdma_device - initialization of rdma device
4578  * @iwdev: irdma device
4579  */
4580 static int irdma_init_rdma_device(struct irdma_device *iwdev)
4581 {
4582 	struct pci_dev *pcidev = iwdev->rf->pcidev;
4583 	int ret;
4584 
4585 	if (iwdev->roce_mode) {
4586 		irdma_init_roce_device(iwdev);
4587 	} else {
4588 		ret = irdma_init_iw_device(iwdev);
4589 		if (ret)
4590 			return ret;
4591 	}
4592 	iwdev->ibdev.phys_port_cnt = 1;
4593 	iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4594 	iwdev->ibdev.dev.parent = &pcidev->dev;
4595 	ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4596 
4597 	return 0;
4598 }
4599 
4600 /**
4601  * irdma_port_ibevent - indicate port event
4602  * @iwdev: irdma device
4603  */
4604 void irdma_port_ibevent(struct irdma_device *iwdev)
4605 {
4606 	struct ib_event event;
4607 
4608 	event.device = &iwdev->ibdev;
4609 	event.element.port_num = 1;
4610 	event.event =
4611 		iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4612 	ib_dispatch_event(&event);
4613 }
4614 
4615 /**
4616  * irdma_ib_unregister_device - unregister rdma device from IB
4617  * core
4618  * @iwdev: irdma device
4619  */
4620 void irdma_ib_unregister_device(struct irdma_device *iwdev)
4621 {
4622 	iwdev->iw_status = 0;
4623 	irdma_port_ibevent(iwdev);
4624 	ib_unregister_device(&iwdev->ibdev);
4625 }
4626 
4627 /**
4628  * irdma_ib_register_device - register irdma device to IB core
4629  * @iwdev: irdma device
4630  */
4631 int irdma_ib_register_device(struct irdma_device *iwdev)
4632 {
4633 	int ret;
4634 
4635 	ret = irdma_init_rdma_device(iwdev);
4636 	if (ret)
4637 		return ret;
4638 
4639 	ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4640 	if (ret)
4641 		goto error;
4642 	dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4643 	ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4644 	if (ret)
4645 		goto error;
4646 
4647 	iwdev->iw_status = 1;
4648 	irdma_port_ibevent(iwdev);
4649 
4650 	return 0;
4651 
4652 error:
4653 	if (ret)
4654 		ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4655 
4656 	return ret;
4657 }
4658 
4659 /**
4660  * irdma_ib_dealloc_device
4661  * @ibdev: ib device
4662  *
4663  * callback from ibdev dealloc_driver to deallocate resources
4664  * unber irdma device
4665  */
4666 void irdma_ib_dealloc_device(struct ib_device *ibdev)
4667 {
4668 	struct irdma_device *iwdev = to_iwdev(ibdev);
4669 
4670 	irdma_rt_deinit_hw(iwdev);
4671 	irdma_ctrl_deinit_hw(iwdev->rf);
4672 	kfree(iwdev->rf);
4673 }
4674