xref: /linux/drivers/infiniband/hw/irdma/verbs.c (revision 6f52370970ac07d352a7af4089e55e0e6425f827)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4 
5 /**
6  * irdma_query_device - get device attributes
7  * @ibdev: device pointer from stack
8  * @props: returning device attributes
9  * @udata: user data
10  */
11 static int irdma_query_device(struct ib_device *ibdev,
12 			      struct ib_device_attr *props,
13 			      struct ib_udata *udata)
14 {
15 	struct irdma_device *iwdev = to_iwdev(ibdev);
16 	struct irdma_pci_f *rf = iwdev->rf;
17 	struct pci_dev *pcidev = iwdev->rf->pcidev;
18 	struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
19 
20 	if (udata->inlen || udata->outlen)
21 		return -EINVAL;
22 
23 	memset(props, 0, sizeof(*props));
24 	addrconf_addr_eui48((u8 *)&props->sys_image_guid,
25 			    iwdev->netdev->dev_addr);
26 	props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
27 			irdma_fw_minor_ver(&rf->sc_dev);
28 	props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
29 				  IB_DEVICE_MEM_MGT_EXTENSIONS;
30 	if (hw_attrs->uk_attrs.hw_rev < IRDMA_GEN_3)
31 		props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
32 	props->vendor_id = pcidev->vendor;
33 	props->vendor_part_id = pcidev->device;
34 
35 	props->hw_ver = rf->pcidev->revision;
36 	props->page_size_cap = hw_attrs->page_size_cap;
37 	props->max_mr_size = hw_attrs->max_mr_size;
38 	props->max_qp = rf->max_qp - rf->used_qps;
39 	props->max_qp_wr = hw_attrs->max_qp_wr;
40 	props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
41 	props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
42 	props->max_cq = rf->max_cq - rf->used_cqs;
43 	props->max_cqe = rf->max_cqe - 1;
44 	props->max_mr = rf->max_mr - rf->used_mrs;
45 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
46 		props->max_mw = props->max_mr;
47 	props->max_pd = rf->max_pd - rf->used_pds;
48 	props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
49 	props->max_qp_rd_atom = hw_attrs->max_hw_ird;
50 	props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
51 	if (rdma_protocol_roce(ibdev, 1)) {
52 		props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
53 		props->max_pkeys = IRDMA_PKEY_TBL_SZ;
54 	}
55 
56 	props->max_ah = rf->max_ah;
57 	props->max_mcast_grp = rf->max_mcg;
58 	props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
59 	props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
60 	props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
61 	props->max_srq = rf->max_srq - rf->used_srqs;
62 	props->max_srq_wr = IRDMA_MAX_SRQ_WRS;
63 	props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
64 	if (hw_attrs->uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
65 		props->atomic_cap = IB_ATOMIC_HCA;
66 	else
67 		props->atomic_cap = IB_ATOMIC_NONE;
68 	props->masked_atomic_cap = props->atomic_cap;
69 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) {
70 #define HCA_CORE_CLOCK_KHZ 1000000UL
71 		props->timestamp_mask = GENMASK(31, 0);
72 		props->hca_core_clock = HCA_CORE_CLOCK_KHZ;
73 	}
74 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
75 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
76 
77 	return 0;
78 }
79 
80 /**
81  * irdma_query_port - get port attributes
82  * @ibdev: device pointer from stack
83  * @port: port number for query
84  * @props: returning device attributes
85  */
86 static int irdma_query_port(struct ib_device *ibdev, u32 port,
87 			    struct ib_port_attr *props)
88 {
89 	struct irdma_device *iwdev = to_iwdev(ibdev);
90 	struct net_device *netdev = iwdev->netdev;
91 
92 	/* no need to zero out pros here. done by caller */
93 
94 	props->max_mtu = IB_MTU_4096;
95 	props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
96 	props->lid = 1;
97 	props->lmc = 0;
98 	props->sm_lid = 0;
99 	props->sm_sl = 0;
100 	if (netif_carrier_ok(netdev) && netif_running(netdev)) {
101 		props->state = IB_PORT_ACTIVE;
102 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
103 	} else {
104 		props->state = IB_PORT_DOWN;
105 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
106 	}
107 
108 	ib_get_eth_speed(ibdev, port, &props->active_speed,
109 			 &props->active_width);
110 
111 	if (rdma_protocol_roce(ibdev, 1)) {
112 		props->gid_tbl_len = 32;
113 		props->ip_gids = true;
114 		props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
115 	} else {
116 		props->gid_tbl_len = 1;
117 	}
118 	props->qkey_viol_cntr = 0;
119 	props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
120 	props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
121 
122 	return 0;
123 }
124 
125 /**
126  * irdma_disassociate_ucontext - Disassociate user context
127  * @context: ib user context
128  */
129 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
130 {
131 }
132 
133 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
134 			     struct vm_area_struct *vma)
135 {
136 	u64 pfn;
137 
138 	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
139 		return -EINVAL;
140 
141 	vma->vm_private_data = ucontext;
142 	pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
143 	       pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
144 
145 	return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
146 				 pgprot_noncached(vma->vm_page_prot), NULL);
147 }
148 
149 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
150 {
151 	struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
152 
153 	kfree(entry);
154 }
155 
156 static struct rdma_user_mmap_entry*
157 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
158 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
159 {
160 	struct irdma_user_mmap_entry *entry = kzalloc_obj(*entry);
161 	int ret;
162 
163 	if (!entry)
164 		return NULL;
165 
166 	entry->bar_offset = bar_offset;
167 	entry->mmap_flag = mmap_flag;
168 
169 	ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
170 					  &entry->rdma_entry, PAGE_SIZE);
171 	if (ret) {
172 		kfree(entry);
173 		return NULL;
174 	}
175 	*mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
176 
177 	return &entry->rdma_entry;
178 }
179 
180 /**
181  * irdma_mmap - user memory map
182  * @context: context created during alloc
183  * @vma: kernel info for user memory map
184  */
185 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
186 {
187 	struct rdma_user_mmap_entry *rdma_entry;
188 	struct irdma_user_mmap_entry *entry;
189 	struct irdma_ucontext *ucontext;
190 	u64 pfn;
191 	int ret;
192 
193 	ucontext = to_ucontext(context);
194 
195 	/* Legacy support for libi40iw with hard-coded mmap key */
196 	if (ucontext->legacy_mode)
197 		return irdma_mmap_legacy(ucontext, vma);
198 
199 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
200 	if (!rdma_entry) {
201 		ibdev_dbg(&ucontext->iwdev->ibdev,
202 			  "VERBS: pgoff[0x%lx] does not have valid entry\n",
203 			  vma->vm_pgoff);
204 		return -EINVAL;
205 	}
206 
207 	entry = to_irdma_mmap_entry(rdma_entry);
208 	ibdev_dbg(&ucontext->iwdev->ibdev,
209 		  "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
210 		  entry->bar_offset, entry->mmap_flag);
211 
212 	pfn = (entry->bar_offset +
213 	      pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
214 
215 	switch (entry->mmap_flag) {
216 	case IRDMA_MMAP_IO_NC:
217 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
218 					pgprot_noncached(vma->vm_page_prot),
219 					rdma_entry);
220 		break;
221 	case IRDMA_MMAP_IO_WC:
222 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
223 					pgprot_writecombine(vma->vm_page_prot),
224 					rdma_entry);
225 		break;
226 	default:
227 		ret = -EINVAL;
228 	}
229 
230 	if (ret)
231 		ibdev_dbg(&ucontext->iwdev->ibdev,
232 			  "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
233 			  entry->bar_offset, entry->mmap_flag, ret);
234 	rdma_user_mmap_entry_put(rdma_entry);
235 
236 	return ret;
237 }
238 
239 /**
240  * irdma_alloc_push_page - allocate a push page for qp
241  * @iwqp: qp pointer
242  */
243 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
244 {
245 	struct irdma_cqp_request *cqp_request;
246 	struct cqp_cmds_info *cqp_info;
247 	struct irdma_device *iwdev = iwqp->iwdev;
248 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
249 	int status;
250 
251 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
252 	if (!cqp_request)
253 		return;
254 
255 	cqp_info = &cqp_request->info;
256 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
257 	cqp_info->post_sq = 1;
258 	cqp_info->in.u.manage_push_page.info.push_idx = 0;
259 	cqp_info->in.u.manage_push_page.info.qs_handle =
260 		qp->vsi->qos[qp->user_pri].qs_handle;
261 	cqp_info->in.u.manage_push_page.info.free_page = 0;
262 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
263 	cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
264 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
265 
266 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
267 	if (!status && cqp_request->compl_info.op_ret_val <
268 	    iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
269 		qp->push_idx = cqp_request->compl_info.op_ret_val;
270 		qp->push_offset = 0;
271 	}
272 
273 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
274 }
275 
276 /**
277  * irdma_alloc_ucontext - Allocate the user context data structure
278  * @uctx: uverbs context pointer
279  * @udata: user data
280  *
281  * This keeps track of all objects associated with a particular
282  * user-mode client.
283  */
284 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
285 				struct ib_udata *udata)
286 {
287 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
288 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
289 	struct ib_device *ibdev = uctx->device;
290 	struct irdma_device *iwdev = to_iwdev(ibdev);
291 	struct irdma_alloc_ucontext_req req = {};
292 	struct irdma_alloc_ucontext_resp uresp = {};
293 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
294 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
295 
296 	if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
297 	    udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
298 		return -EINVAL;
299 
300 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
301 		return -EINVAL;
302 
303 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
304 		goto ver_error;
305 
306 	ucontext->iwdev = iwdev;
307 	ucontext->abi_ver = req.userspace_ver;
308 
309 	if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) &&
310 	    uk_attrs->hw_rev >= IRDMA_GEN_3)
311 		return -EOPNOTSUPP;
312 
313 	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
314 		ucontext->use_raw_attrs = true;
315 
316 	/* GEN_1 legacy support with libi40iw */
317 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
318 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
319 			return -EOPNOTSUPP;
320 
321 		ucontext->legacy_mode = true;
322 		uresp.max_qps = iwdev->rf->max_qp;
323 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
324 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
325 		uresp.kernel_ver = req.userspace_ver;
326 		if (ib_copy_to_udata(udata, &uresp,
327 				     min(sizeof(uresp), udata->outlen)))
328 			return -EFAULT;
329 	} else {
330 		u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
331 
332 		ucontext->db_mmap_entry =
333 			irdma_user_mmap_entry_insert(ucontext, bar_off,
334 						     IRDMA_MMAP_IO_NC,
335 						     &uresp.db_mmap_key);
336 		if (!ucontext->db_mmap_entry)
337 			return -ENOMEM;
338 
339 		uresp.kernel_ver = IRDMA_ABI_VER;
340 		uresp.feature_flags = uk_attrs->feature_flags;
341 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
342 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
343 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
344 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
345 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
346 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
347 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
348 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
349 		uresp.hw_rev = uk_attrs->hw_rev;
350 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
351 		uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
352 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
353 		uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta;
354 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA;
355 		if (ib_copy_to_udata(udata, &uresp,
356 				     min(sizeof(uresp), udata->outlen))) {
357 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
358 			return -EFAULT;
359 		}
360 	}
361 
362 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
363 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
364 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
365 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
366 	INIT_LIST_HEAD(&ucontext->srq_reg_mem_list);
367 	spin_lock_init(&ucontext->srq_reg_mem_list_lock);
368 
369 	return 0;
370 
371 ver_error:
372 	ibdev_err(&iwdev->ibdev,
373 		  "Invalid userspace driver version detected. Detected version %d, should be %d\n",
374 		  req.userspace_ver, IRDMA_ABI_VER);
375 	return -EINVAL;
376 }
377 
378 /**
379  * irdma_dealloc_ucontext - deallocate the user context data structure
380  * @context: user context created during alloc
381  */
382 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
383 {
384 	struct irdma_ucontext *ucontext = to_ucontext(context);
385 
386 	rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
387 }
388 
389 /**
390  * irdma_alloc_pd - allocate protection domain
391  * @pd: PD pointer
392  * @udata: user data
393  */
394 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
395 {
396 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
397 	struct irdma_pd *iwpd = to_iwpd(pd);
398 	struct irdma_device *iwdev = to_iwdev(pd->device);
399 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
400 	struct irdma_pci_f *rf = iwdev->rf;
401 	struct irdma_alloc_pd_resp uresp = {};
402 	struct irdma_sc_pd *sc_pd;
403 	u32 pd_id = 0;
404 	int err;
405 
406 	if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
407 		return -EINVAL;
408 
409 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
410 			       &rf->next_pd);
411 	if (err)
412 		return err;
413 
414 	sc_pd = &iwpd->sc_pd;
415 	if (udata) {
416 		struct irdma_ucontext *ucontext =
417 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
418 						  ibucontext);
419 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
420 		uresp.pd_id = pd_id;
421 		if (ib_copy_to_udata(udata, &uresp,
422 				     min(sizeof(uresp), udata->outlen))) {
423 			err = -EFAULT;
424 			goto error;
425 		}
426 	} else {
427 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
428 	}
429 
430 	return 0;
431 error:
432 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
433 
434 	return err;
435 }
436 
437 /**
438  * irdma_dealloc_pd - deallocate pd
439  * @ibpd: ptr of pd to be deallocated
440  * @udata: user data
441  */
442 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
443 {
444 	struct irdma_pd *iwpd = to_iwpd(ibpd);
445 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
446 
447 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
448 
449 	return 0;
450 }
451 
452 /**
453  * irdma_get_pbl - Retrieve pbl from a list given a virtual
454  * address
455  * @va: user virtual address
456  * @pbl_list: pbl list to search in (QP's or CQ's)
457  */
458 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
459 				       struct list_head *pbl_list)
460 {
461 	struct irdma_pbl *iwpbl;
462 
463 	list_for_each_entry (iwpbl, pbl_list, list) {
464 		if (iwpbl->user_base == va) {
465 			list_del(&iwpbl->list);
466 			iwpbl->on_list = false;
467 			return iwpbl;
468 		}
469 	}
470 
471 	return NULL;
472 }
473 
474 /**
475  * irdma_clean_cqes - clean cq entries for qp
476  * @iwqp: qp ptr (user or kernel)
477  * @iwcq: cq ptr
478  */
479 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
480 {
481 	struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
482 	unsigned long flags;
483 
484 	spin_lock_irqsave(&iwcq->lock, flags);
485 	irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
486 	spin_unlock_irqrestore(&iwcq->lock, flags);
487 }
488 
489 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
490 {
491 	if (iwqp->push_db_mmap_entry) {
492 		rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
493 		iwqp->push_db_mmap_entry = NULL;
494 	}
495 	if (iwqp->push_wqe_mmap_entry) {
496 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
497 		iwqp->push_wqe_mmap_entry = NULL;
498 	}
499 }
500 
501 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
502 					 struct irdma_qp *iwqp,
503 					 u64 *push_wqe_mmap_key,
504 					 u64 *push_db_mmap_key)
505 {
506 	struct irdma_device *iwdev = ucontext->iwdev;
507 	u64 rsvd, bar_off;
508 
509 	rsvd = IRDMA_PF_BAR_RSVD;
510 	bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
511 	/* skip over db page */
512 	bar_off += IRDMA_HW_PAGE_SIZE;
513 	/* push wqe page */
514 	bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
515 	iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
516 					bar_off, IRDMA_MMAP_IO_WC,
517 					push_wqe_mmap_key);
518 	if (!iwqp->push_wqe_mmap_entry)
519 		return -ENOMEM;
520 
521 	/* push doorbell page */
522 	bar_off += IRDMA_HW_PAGE_SIZE;
523 	iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
524 					bar_off, IRDMA_MMAP_IO_NC,
525 					push_db_mmap_key);
526 	if (!iwqp->push_db_mmap_entry) {
527 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
528 		return -ENOMEM;
529 	}
530 
531 	return 0;
532 }
533 
534 /**
535  * irdma_destroy_qp - destroy qp
536  * @ibqp: qp's ib pointer also to get to device's qp address
537  * @udata: user data
538  */
539 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
540 {
541 	struct irdma_qp *iwqp = to_iwqp(ibqp);
542 	struct irdma_device *iwdev = iwqp->iwdev;
543 
544 	iwqp->sc_qp.qp_uk.destroy_pending = true;
545 
546 	if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
547 		irdma_modify_qp_to_err(&iwqp->sc_qp);
548 
549 	if (!iwqp->user_mode)
550 		cancel_delayed_work_sync(&iwqp->dwork_flush);
551 
552 	if (!iwqp->user_mode) {
553 		if (iwqp->iwscq) {
554 			irdma_clean_cqes(iwqp, iwqp->iwscq);
555 			if (iwqp->iwrcq != iwqp->iwscq)
556 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
557 		}
558 	}
559 
560 	irdma_qp_rem_ref(&iwqp->ibqp);
561 	if (!iwdev->rf->reset)
562 		wait_for_completion(&iwqp->free_qp);
563 	irdma_free_lsmm_rsrc(iwqp);
564 	irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
565 
566 	irdma_remove_push_mmap_entries(iwqp);
567 
568 	if (iwqp->sc_qp.qp_uk.qp_id == 1)
569 		iwdev->rf->hwqp1_rsvd = false;
570 	irdma_free_qp_rsrc(iwqp);
571 
572 	return 0;
573 }
574 
575 /**
576  * irdma_setup_virt_qp - setup for allocation of virtual qp
577  * @iwdev: irdma device
578  * @iwqp: qp ptr
579  * @init_info: initialize info to return
580  */
581 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
582 			       struct irdma_qp *iwqp,
583 			       struct irdma_qp_init_info *init_info)
584 {
585 	struct irdma_pbl *iwpbl = iwqp->iwpbl;
586 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
587 
588 	iwqp->page = qpmr->sq_page;
589 	init_info->shadow_area_pa = qpmr->shadow;
590 	if (iwpbl->pbl_allocated) {
591 		init_info->virtual_map = true;
592 		init_info->sq_pa = qpmr->sq_pbl.idx;
593 		/* Need to use contiguous buffer for RQ of QP
594 		 * in case it is associated with SRQ.
595 		 */
596 		init_info->rq_pa = init_info->qp_uk_init_info.srq_uk ?
597 			qpmr->rq_pa : qpmr->rq_pbl.idx;
598 	} else {
599 		init_info->sq_pa = qpmr->sq_pbl.addr;
600 		init_info->rq_pa = qpmr->rq_pbl.addr;
601 	}
602 }
603 
604 /**
605  * irdma_setup_umode_qp - setup sq and rq size in user mode qp
606  * @udata: udata
607  * @iwdev: iwarp device
608  * @iwqp: qp ptr (user or kernel)
609  * @info: initialize info to return
610  * @init_attr: Initial QP create attributes
611  */
612 static int irdma_setup_umode_qp(struct ib_udata *udata,
613 				struct irdma_device *iwdev,
614 				struct irdma_qp *iwqp,
615 				struct irdma_qp_init_info *info,
616 				struct ib_qp_init_attr *init_attr)
617 {
618 	struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
619 				struct irdma_ucontext, ibucontext);
620 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
621 	struct irdma_create_qp_req req;
622 	unsigned long flags;
623 	int ret;
624 
625 	ret = ib_copy_from_udata(&req, udata,
626 				 min(sizeof(req), udata->inlen));
627 	if (ret) {
628 		ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
629 		return ret;
630 	}
631 
632 	iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
633 	iwqp->user_mode = 1;
634 	if (req.user_wqe_bufs) {
635 		info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
636 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
637 		iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
638 					    &ucontext->qp_reg_mem_list);
639 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
640 
641 		if (!iwqp->iwpbl) {
642 			ret = -ENODATA;
643 			ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
644 			return ret;
645 		}
646 	}
647 
648 	if (!ucontext->use_raw_attrs) {
649 		/**
650 		 * Maintain backward compat with older ABI which passes sq and
651 		 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
652 		 * There is no way to compute the correct value of
653 		 * iwqp->max_send_wr/max_recv_wr in the kernel.
654 		 */
655 		iwqp->max_send_wr = init_attr->cap.max_send_wr;
656 		iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
657 		ukinfo->sq_size = init_attr->cap.max_send_wr;
658 		ukinfo->rq_size = init_attr->cap.max_recv_wr;
659 		irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
660 				       &ukinfo->rq_shift);
661 	} else {
662 		ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
663 						   &ukinfo->sq_shift);
664 		if (ret)
665 			return ret;
666 
667 		ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
668 						   &ukinfo->rq_shift);
669 		if (ret)
670 			return ret;
671 
672 		iwqp->max_send_wr =
673 			(ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
674 		iwqp->max_recv_wr =
675 			(ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
676 		ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
677 		ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
678 	}
679 
680 	irdma_setup_virt_qp(iwdev, iwqp, info);
681 
682 	return 0;
683 }
684 
685 /**
686  * irdma_setup_kmode_qp - setup initialization for kernel mode qp
687  * @iwdev: iwarp device
688  * @iwqp: qp ptr (user or kernel)
689  * @info: initialize info to return
690  * @init_attr: Initial QP create attributes
691  */
692 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
693 				struct irdma_qp *iwqp,
694 				struct irdma_qp_init_info *info,
695 				struct ib_qp_init_attr *init_attr)
696 {
697 	struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
698 	u32 size;
699 	int status;
700 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
701 
702 	status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
703 					      &ukinfo->sq_shift);
704 	if (status)
705 		return status;
706 
707 	status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
708 					      &ukinfo->rq_shift);
709 	if (status)
710 		return status;
711 
712 	iwqp->kqp.sq_wrid_mem =
713 		kzalloc_objs(*iwqp->kqp.sq_wrid_mem, ukinfo->sq_depth);
714 	if (!iwqp->kqp.sq_wrid_mem)
715 		return -ENOMEM;
716 
717 	iwqp->kqp.rq_wrid_mem =
718 		kzalloc_objs(*iwqp->kqp.rq_wrid_mem, ukinfo->rq_depth);
719 
720 	if (!iwqp->kqp.rq_wrid_mem) {
721 		kfree(iwqp->kqp.sq_wrid_mem);
722 		iwqp->kqp.sq_wrid_mem = NULL;
723 		return -ENOMEM;
724 	}
725 
726 	ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
727 	ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
728 
729 	size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
730 	size += (IRDMA_SHADOW_AREA_SIZE << 3);
731 
732 	mem->size = ALIGN(size, 256);
733 	mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
734 				     &mem->pa, GFP_KERNEL);
735 	if (!mem->va) {
736 		kfree(iwqp->kqp.sq_wrid_mem);
737 		iwqp->kqp.sq_wrid_mem = NULL;
738 		kfree(iwqp->kqp.rq_wrid_mem);
739 		iwqp->kqp.rq_wrid_mem = NULL;
740 		return -ENOMEM;
741 	}
742 
743 	ukinfo->sq = mem->va;
744 	info->sq_pa = mem->pa;
745 	ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
746 	info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
747 	ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
748 	info->shadow_area_pa =
749 		info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
750 	ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
751 	ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
752 	ukinfo->qp_id = info->qp_uk_init_info.qp_id;
753 
754 	iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
755 	iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
756 	init_attr->cap.max_send_wr = iwqp->max_send_wr;
757 	init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
758 
759 	return 0;
760 }
761 
762 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
763 {
764 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
765 	struct irdma_cqp_request *cqp_request;
766 	struct cqp_cmds_info *cqp_info;
767 	struct irdma_create_qp_info *qp_info;
768 	int status;
769 
770 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
771 	if (!cqp_request)
772 		return -ENOMEM;
773 
774 	cqp_info = &cqp_request->info;
775 	qp_info = &cqp_request->info.in.u.qp_create.info;
776 	qp_info->mac_valid = true;
777 	qp_info->cq_num_valid = true;
778 	qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
779 
780 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
781 	cqp_info->post_sq = 1;
782 	cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
783 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
784 	status = irdma_handle_cqp_op(rf, cqp_request);
785 	irdma_put_cqp_request(&rf->cqp, cqp_request);
786 
787 	return status;
788 }
789 
790 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
791 					       struct irdma_qp_host_ctx_info *ctx_info)
792 {
793 	struct irdma_device *iwdev = iwqp->iwdev;
794 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
795 	struct irdma_roce_offload_info *roce_info;
796 	struct irdma_udp_offload_info *udp_info;
797 
798 	udp_info = &iwqp->udp_info;
799 	udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
800 	udp_info->cwnd = iwdev->roce_cwnd;
801 	udp_info->rexmit_thresh = 2;
802 	udp_info->rnr_nak_thresh = 2;
803 	udp_info->src_port = 0xc000;
804 	udp_info->dst_port = ROCE_V2_UDP_DPORT;
805 	roce_info = &iwqp->roce_info;
806 	ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
807 
808 	if (iwqp->ibqp.qp_type == IB_QPT_GSI && iwqp->ibqp.qp_num != 1)
809 		roce_info->is_qp1 = true;
810 	roce_info->rd_en = true;
811 	roce_info->wr_rdresp_en = true;
812 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
813 		roce_info->bind_en = true;
814 	roce_info->dcqcn_en = false;
815 	roce_info->rtomin = 5;
816 
817 	roce_info->ack_credits = iwdev->roce_ackcreds;
818 	roce_info->ird_size = dev->hw_attrs.max_hw_ird;
819 	roce_info->ord_size = dev->hw_attrs.max_hw_ord;
820 
821 	if (!iwqp->user_mode) {
822 		roce_info->priv_mode_en = true;
823 		roce_info->fast_reg_en = true;
824 		roce_info->udprivcq_en = true;
825 	}
826 	roce_info->roce_tver = 0;
827 
828 	ctx_info->roce_info = &iwqp->roce_info;
829 	ctx_info->udp_info = &iwqp->udp_info;
830 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
831 }
832 
833 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
834 					     struct irdma_qp_host_ctx_info *ctx_info)
835 {
836 	struct irdma_device *iwdev = iwqp->iwdev;
837 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
838 	struct irdma_iwarp_offload_info *iwarp_info;
839 
840 	iwarp_info = &iwqp->iwarp_info;
841 	ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
842 	iwarp_info->rd_en = true;
843 	iwarp_info->wr_rdresp_en = true;
844 	iwarp_info->ecn_en = true;
845 	iwarp_info->rtomin = 5;
846 
847 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
848 		iwarp_info->ib_rd_en = true;
849 	if (!iwqp->user_mode) {
850 		iwarp_info->priv_mode_en = true;
851 		iwarp_info->fast_reg_en = true;
852 	}
853 	iwarp_info->ddp_ver = 1;
854 	iwarp_info->rdmap_ver = 1;
855 
856 	ctx_info->iwarp_info = &iwqp->iwarp_info;
857 	ctx_info->iwarp_info_valid = true;
858 	irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
859 	ctx_info->iwarp_info_valid = false;
860 }
861 
862 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
863 				   struct irdma_device *iwdev)
864 {
865 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
866 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
867 
868 	if (init_attr->create_flags)
869 		return -EOPNOTSUPP;
870 
871 	if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
872 	    init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
873 	    init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
874 	    init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
875 	    init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
876 		return -EINVAL;
877 
878 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
879 		if (init_attr->qp_type != IB_QPT_RC &&
880 		    init_attr->qp_type != IB_QPT_UD &&
881 		    init_attr->qp_type != IB_QPT_GSI)
882 			return -EOPNOTSUPP;
883 	} else {
884 		if (init_attr->qp_type != IB_QPT_RC)
885 			return -EOPNOTSUPP;
886 	}
887 
888 	return 0;
889 }
890 
891 static void irdma_flush_worker(struct work_struct *work)
892 {
893 	struct delayed_work *dwork = to_delayed_work(work);
894 	struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
895 
896 	irdma_generate_flush_completions(iwqp);
897 }
898 
899 static int irdma_setup_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 *qp_num)
900 {
901 	struct irdma_device *iwdev = iwqp->iwdev;
902 	struct irdma_pci_f *rf = iwdev->rf;
903 	unsigned long flags;
904 	int ret;
905 
906 	if (rf->rdma_ver <= IRDMA_GEN_2) {
907 		*qp_num = 1;
908 		return 0;
909 	}
910 
911 	spin_lock_irqsave(&rf->rsrc_lock, flags);
912 	if (!rf->hwqp1_rsvd) {
913 		*qp_num = 1;
914 		rf->hwqp1_rsvd = true;
915 		spin_unlock_irqrestore(&rf->rsrc_lock, flags);
916 	} else {
917 		spin_unlock_irqrestore(&rf->rsrc_lock, flags);
918 		ret = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
919 				       qp_num, &rf->next_qp);
920 		if (ret)
921 			return ret;
922 	}
923 
924 	ret = irdma_vchnl_req_add_vport(&rf->sc_dev, iwdev->vport_id, *qp_num,
925 					(&iwdev->vsi)->qos);
926 	if (ret) {
927 		if (*qp_num != 1) {
928 			irdma_free_rsrc(rf, rf->allocated_qps, *qp_num);
929 		} else {
930 			spin_lock_irqsave(&rf->rsrc_lock, flags);
931 			rf->hwqp1_rsvd = false;
932 			spin_unlock_irqrestore(&rf->rsrc_lock, flags);
933 		}
934 		return ret;
935 	}
936 
937 	return 0;
938 }
939 
940 /**
941  * irdma_create_qp - create qp
942  * @ibqp: ptr of qp
943  * @init_attr: attributes for qp
944  * @udata: user data for create qp
945  */
946 static int irdma_create_qp(struct ib_qp *ibqp,
947 			   struct ib_qp_init_attr *init_attr,
948 			   struct ib_udata *udata)
949 {
950 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
951 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
952 	struct ib_pd *ibpd = ibqp->pd;
953 	struct irdma_pd *iwpd = to_iwpd(ibpd);
954 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
955 	struct irdma_pci_f *rf = iwdev->rf;
956 	struct irdma_qp *iwqp = to_iwqp(ibqp);
957 	struct irdma_create_qp_resp uresp = {};
958 	u32 qp_num = 0;
959 	int err_code;
960 	struct irdma_sc_qp *qp;
961 	struct irdma_sc_dev *dev = &rf->sc_dev;
962 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
963 	struct irdma_qp_init_info init_info = {};
964 	struct irdma_qp_host_ctx_info *ctx_info;
965 	struct irdma_srq *iwsrq;
966 	bool srq_valid = false;
967 	u32 srq_id = 0;
968 
969 	if (init_attr->srq) {
970 		iwsrq = to_iwsrq(init_attr->srq);
971 		srq_valid = true;
972 		srq_id = iwsrq->srq_num;
973 		init_attr->cap.max_recv_sge = uk_attrs->max_hw_wq_frags;
974 		init_attr->cap.max_recv_wr = 4;
975 		init_info.qp_uk_init_info.srq_uk = &iwsrq->sc_srq.srq_uk;
976 	}
977 
978 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
979 	if (err_code)
980 		return err_code;
981 
982 	if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
983 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
984 		return -EINVAL;
985 
986 	init_info.vsi = &iwdev->vsi;
987 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
988 	init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
989 	init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
990 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
991 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
992 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
993 
994 	qp = &iwqp->sc_qp;
995 	qp->qp_uk.back_qp = iwqp;
996 	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
997 
998 	iwqp->iwdev = iwdev;
999 	iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
1000 				      256);
1001 	iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
1002 						 iwqp->q2_ctx_mem.size,
1003 						 &iwqp->q2_ctx_mem.pa,
1004 						 GFP_KERNEL);
1005 	if (!iwqp->q2_ctx_mem.va)
1006 		return -ENOMEM;
1007 
1008 	init_info.q2 = iwqp->q2_ctx_mem.va;
1009 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
1010 	init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
1011 	init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
1012 
1013 	if (init_attr->qp_type == IB_QPT_GSI) {
1014 		err_code = irdma_setup_gsi_qp_rsrc(iwqp, &qp_num);
1015 		if (err_code)
1016 			goto error;
1017 		iwqp->ibqp.qp_num = 1;
1018 	} else {
1019 		err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
1020 					    &qp_num, &rf->next_qp);
1021 		if (err_code)
1022 			goto error;
1023 		iwqp->ibqp.qp_num = qp_num;
1024 	}
1025 
1026 	iwqp->iwpd = iwpd;
1027 	qp = &iwqp->sc_qp;
1028 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
1029 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
1030 	iwqp->host_ctx.va = init_info.host_ctx;
1031 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
1032 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
1033 
1034 	init_info.pd = &iwpd->sc_pd;
1035 	init_info.qp_uk_init_info.qp_id = qp_num;
1036 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
1037 		init_info.qp_uk_init_info.first_sq_wq = 1;
1038 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
1039 	init_waitqueue_head(&iwqp->waitq);
1040 	init_waitqueue_head(&iwqp->mod_qp_waitq);
1041 
1042 	if (udata) {
1043 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
1044 		err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
1045 						init_attr);
1046 	} else {
1047 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
1048 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
1049 		err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
1050 	}
1051 
1052 	if (err_code) {
1053 		ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
1054 		goto error;
1055 	}
1056 
1057 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1058 		if (init_attr->qp_type == IB_QPT_RC) {
1059 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
1060 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1061 							    IRDMA_WRITE_WITH_IMM |
1062 							    IRDMA_ROCE;
1063 		} else {
1064 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
1065 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1066 							    IRDMA_ROCE;
1067 		}
1068 	} else {
1069 		init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
1070 		init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
1071 	}
1072 
1073 	if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
1074 		init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
1075 
1076 	err_code = irdma_sc_qp_init(qp, &init_info);
1077 	if (err_code) {
1078 		ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
1079 		goto error;
1080 	}
1081 
1082 	ctx_info = &iwqp->ctx_info;
1083 	ctx_info->srq_valid = srq_valid;
1084 	ctx_info->srq_id = srq_id;
1085 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1086 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1087 
1088 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1089 		if (dev->ws_add(&iwdev->vsi, 0)) {
1090 			irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1091 			err_code = -EINVAL;
1092 			goto error;
1093 		}
1094 		irdma_qp_add_qos(&iwqp->sc_qp);
1095 		irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
1096 	} else {
1097 		irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1098 	}
1099 
1100 	err_code = irdma_cqp_create_qp_cmd(iwqp);
1101 	if (err_code)
1102 		goto error;
1103 
1104 	refcount_set(&iwqp->refcnt, 1);
1105 	spin_lock_init(&iwqp->lock);
1106 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1107 	iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1108 	rf->qp_table[qp_num] = iwqp;
1109 	init_completion(&iwqp->free_qp);
1110 
1111 	if (udata) {
1112 		/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
1113 		if (udata->outlen < sizeof(uresp)) {
1114 			uresp.lsmm = 1;
1115 			uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
1116 		} else {
1117 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1118 				uresp.lsmm = 1;
1119 		}
1120 		uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
1121 		uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
1122 		uresp.qp_id = qp_num;
1123 		uresp.qp_caps = qp->qp_uk.qp_caps;
1124 
1125 		err_code = ib_copy_to_udata(udata, &uresp,
1126 					    min(sizeof(uresp), udata->outlen));
1127 		if (err_code) {
1128 			ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
1129 			irdma_destroy_qp(&iwqp->ibqp, udata);
1130 			return err_code;
1131 		}
1132 	}
1133 
1134 	return 0;
1135 
1136 error:
1137 	irdma_free_qp_rsrc(iwqp);
1138 	return err_code;
1139 }
1140 
1141 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1142 {
1143 	int acc_flags = 0;
1144 
1145 	if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1146 		if (iwqp->roce_info.wr_rdresp_en) {
1147 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1148 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1149 		}
1150 		if (iwqp->roce_info.rd_en)
1151 			acc_flags |= IB_ACCESS_REMOTE_READ;
1152 		if (iwqp->roce_info.bind_en)
1153 			acc_flags |= IB_ACCESS_MW_BIND;
1154 		if (iwqp->ctx_info.remote_atomics_en)
1155 			acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1156 	} else {
1157 		if (iwqp->iwarp_info.wr_rdresp_en) {
1158 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1159 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1160 		}
1161 		if (iwqp->iwarp_info.rd_en)
1162 			acc_flags |= IB_ACCESS_REMOTE_READ;
1163 		if (iwqp->ctx_info.remote_atomics_en)
1164 			acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1165 	}
1166 	return acc_flags;
1167 }
1168 
1169 /**
1170  * irdma_query_qp - query qp attributes
1171  * @ibqp: qp pointer
1172  * @attr: attributes pointer
1173  * @attr_mask: Not used
1174  * @init_attr: qp attributes to return
1175  */
1176 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1177 			  int attr_mask, struct ib_qp_init_attr *init_attr)
1178 {
1179 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1180 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
1181 
1182 	memset(attr, 0, sizeof(*attr));
1183 	memset(init_attr, 0, sizeof(*init_attr));
1184 
1185 	attr->qp_state = iwqp->ibqp_state;
1186 	attr->cur_qp_state = iwqp->ibqp_state;
1187 	attr->cap.max_send_wr = iwqp->max_send_wr;
1188 	attr->cap.max_recv_wr = iwqp->max_recv_wr;
1189 	attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1190 	attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1191 	attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1192 	attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1193 	attr->port_num = 1;
1194 	if (rdma_protocol_roce(ibqp->device, 1)) {
1195 		attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1196 		attr->qkey = iwqp->roce_info.qkey;
1197 		attr->rq_psn = iwqp->udp_info.epsn;
1198 		attr->sq_psn = iwqp->udp_info.psn_nxt;
1199 		attr->dest_qp_num = iwqp->roce_info.dest_qp;
1200 		attr->pkey_index = iwqp->roce_info.p_key;
1201 		attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1202 		attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1203 		attr->min_rnr_timer = iwqp->udp_info.min_rnr_timer;
1204 		attr->max_rd_atomic = iwqp->roce_info.ord_size;
1205 		attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1206 	}
1207 
1208 	init_attr->event_handler = iwqp->ibqp.event_handler;
1209 	init_attr->qp_context = iwqp->ibqp.qp_context;
1210 	init_attr->send_cq = iwqp->ibqp.send_cq;
1211 	init_attr->recv_cq = iwqp->ibqp.recv_cq;
1212 	init_attr->srq = iwqp->ibqp.srq;
1213 	init_attr->cap = attr->cap;
1214 
1215 	return 0;
1216 }
1217 
1218 /**
1219  * irdma_query_pkey - Query partition key
1220  * @ibdev: device pointer from stack
1221  * @port: port number
1222  * @index: index of pkey
1223  * @pkey: pointer to store the pkey
1224  */
1225 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1226 			    u16 *pkey)
1227 {
1228 	if (index >= IRDMA_PKEY_TBL_SZ)
1229 		return -EINVAL;
1230 
1231 	*pkey = IRDMA_DEFAULT_PKEY;
1232 	return 0;
1233 }
1234 
1235 static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
1236 {
1237 	struct net_device *ndev;
1238 
1239 	rcu_read_lock();
1240 	ndev = rcu_dereference(attr->ndev);
1241 	if (!ndev)
1242 		goto exit;
1243 	if (is_vlan_dev(ndev)) {
1244 		u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
1245 
1246 		prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1247 	}
1248 exit:
1249 	rcu_read_unlock();
1250 	return prio;
1251 }
1252 
1253 static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
1254 {
1255 	if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
1256 				!iwqp->suspend_pending,
1257 				msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
1258 		iwqp->suspend_pending = false;
1259 		ibdev_warn(&iwqp->iwdev->ibdev,
1260 			   "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
1261 			   iwqp->ibqp.qp_num, iwqp->last_aeq);
1262 		return -EBUSY;
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 /**
1269  * irdma_modify_qp_roce - modify qp request
1270  * @ibqp: qp's pointer for modify
1271  * @attr: access attributes
1272  * @attr_mask: state mask
1273  * @udata: user data
1274  */
1275 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1276 			 int attr_mask, struct ib_udata *udata)
1277 {
1278 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1279 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1280 	struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1281 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1282 	struct irdma_device *iwdev = iwqp->iwdev;
1283 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1284 	struct irdma_qp_host_ctx_info *ctx_info;
1285 	struct irdma_roce_offload_info *roce_info;
1286 	struct irdma_udp_offload_info *udp_info;
1287 	struct irdma_modify_qp_info info = {};
1288 	struct irdma_modify_qp_resp uresp = {};
1289 	struct irdma_modify_qp_req ureq = {};
1290 	unsigned long flags;
1291 	u8 issue_modify_qp = 0;
1292 	int ret = 0;
1293 
1294 	ctx_info = &iwqp->ctx_info;
1295 	roce_info = &iwqp->roce_info;
1296 	udp_info = &iwqp->udp_info;
1297 
1298 	if (udata) {
1299 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1300 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1301 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1302 			return -EINVAL;
1303 	}
1304 
1305 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1306 		return -EOPNOTSUPP;
1307 
1308 	if (attr_mask & IB_QP_DEST_QPN)
1309 		roce_info->dest_qp = attr->dest_qp_num;
1310 
1311 	if (attr_mask & IB_QP_PKEY_INDEX) {
1312 		ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1313 				       &roce_info->p_key);
1314 		if (ret)
1315 			return ret;
1316 	}
1317 
1318 	if (attr_mask & IB_QP_QKEY)
1319 		roce_info->qkey = attr->qkey;
1320 
1321 	if (attr_mask & IB_QP_PATH_MTU)
1322 		udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1323 
1324 	if (attr_mask & IB_QP_SQ_PSN) {
1325 		udp_info->psn_nxt = attr->sq_psn;
1326 		udp_info->lsn =  0xffff;
1327 		udp_info->psn_una = attr->sq_psn;
1328 		udp_info->psn_max = attr->sq_psn;
1329 	}
1330 
1331 	if (attr_mask & IB_QP_RQ_PSN)
1332 		udp_info->epsn = attr->rq_psn;
1333 
1334 	if (attr_mask & IB_QP_RNR_RETRY)
1335 		udp_info->rnr_nak_thresh = attr->rnr_retry;
1336 
1337 	if (attr_mask & IB_QP_MIN_RNR_TIMER &&
1338 	    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
1339 		udp_info->min_rnr_timer = attr->min_rnr_timer;
1340 
1341 	if (attr_mask & IB_QP_RETRY_CNT)
1342 		udp_info->rexmit_thresh = attr->retry_cnt;
1343 
1344 	ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1345 
1346 	if (attr_mask & IB_QP_AV) {
1347 		struct irdma_av *av = &iwqp->roce_ah.av;
1348 		const struct ib_gid_attr *sgid_attr =
1349 				attr->ah_attr.grh.sgid_attr;
1350 		u16 vlan_id = VLAN_N_VID;
1351 		u32 local_ip[4];
1352 
1353 		memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1354 		if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1355 			udp_info->ttl = attr->ah_attr.grh.hop_limit;
1356 			udp_info->flow_label = attr->ah_attr.grh.flow_label;
1357 			udp_info->tos = attr->ah_attr.grh.traffic_class;
1358 			udp_info->src_port =
1359 				rdma_get_udp_sport(udp_info->flow_label,
1360 						   ibqp->qp_num,
1361 						   roce_info->dest_qp);
1362 			irdma_qp_rem_qos(&iwqp->sc_qp);
1363 			dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1364 			if (iwqp->sc_qp.vsi->dscp_mode)
1365 				ctx_info->user_pri =
1366 				iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1367 			else
1368 				ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1369 		}
1370 		ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1371 					      ctx_info->roce_info->mac_addr);
1372 		if (ret)
1373 			return ret;
1374 		ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
1375 							      ctx_info->user_pri);
1376 		if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1377 			return -ENOMEM;
1378 		iwqp->sc_qp.user_pri = ctx_info->user_pri;
1379 		irdma_qp_add_qos(&iwqp->sc_qp);
1380 
1381 		if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1382 			vlan_id = 0;
1383 		if (vlan_id < VLAN_N_VID) {
1384 			udp_info->insert_vlan_tag = true;
1385 			udp_info->vlan_tag = vlan_id |
1386 				ctx_info->user_pri << VLAN_PRIO_SHIFT;
1387 		} else {
1388 			udp_info->insert_vlan_tag = false;
1389 		}
1390 
1391 		av->attrs = attr->ah_attr;
1392 		rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1393 		rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1394 		av->net_type = rdma_gid_attr_network_type(sgid_attr);
1395 		if (av->net_type == RDMA_NETWORK_IPV6) {
1396 			__be32 *daddr =
1397 				av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1398 			__be32 *saddr =
1399 				av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1400 
1401 			irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1402 			irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1403 
1404 			udp_info->ipv4 = false;
1405 			irdma_copy_ip_ntohl(local_ip, daddr);
1406 
1407 		} else if (av->net_type == RDMA_NETWORK_IPV4) {
1408 			__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1409 			__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1410 
1411 			local_ip[0] = ntohl(daddr);
1412 
1413 			udp_info->ipv4 = true;
1414 			udp_info->dest_ip_addr[0] = 0;
1415 			udp_info->dest_ip_addr[1] = 0;
1416 			udp_info->dest_ip_addr[2] = 0;
1417 			udp_info->dest_ip_addr[3] = local_ip[0];
1418 
1419 			udp_info->local_ipaddr[0] = 0;
1420 			udp_info->local_ipaddr[1] = 0;
1421 			udp_info->local_ipaddr[2] = 0;
1422 			udp_info->local_ipaddr[3] = ntohl(saddr);
1423 		}
1424 		udp_info->arp_idx =
1425 			irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1426 				      attr->ah_attr.roce.dmac);
1427 	}
1428 
1429 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1430 		if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1431 			ibdev_err(&iwdev->ibdev,
1432 				  "rd_atomic = %d, above max_hw_ord=%d\n",
1433 				  attr->max_rd_atomic,
1434 				  dev->hw_attrs.max_hw_ord);
1435 			return -EINVAL;
1436 		}
1437 		if (attr->max_rd_atomic)
1438 			roce_info->ord_size = attr->max_rd_atomic;
1439 		info.ord_valid = true;
1440 	}
1441 
1442 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1443 		if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1444 			ibdev_err(&iwdev->ibdev,
1445 				  "rd_atomic = %d, above max_hw_ird=%d\n",
1446 				   attr->max_dest_rd_atomic,
1447 				   dev->hw_attrs.max_hw_ird);
1448 			return -EINVAL;
1449 		}
1450 		if (attr->max_dest_rd_atomic)
1451 			roce_info->ird_size = attr->max_dest_rd_atomic;
1452 	}
1453 
1454 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1455 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1456 			roce_info->wr_rdresp_en = true;
1457 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1458 			roce_info->wr_rdresp_en = true;
1459 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1460 			roce_info->rd_en = true;
1461 		if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
1462 			if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
1463 				ctx_info->remote_atomics_en = true;
1464 	}
1465 
1466 	ibdev_dbg(&iwdev->ibdev,
1467 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1468 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1469 		  iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1470 
1471 	spin_lock_irqsave(&iwqp->lock, flags);
1472 	if (attr_mask & IB_QP_STATE) {
1473 		if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1474 					iwqp->ibqp.qp_type, attr_mask)) {
1475 			ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1476 				   iwqp->ibqp.qp_num, iwqp->ibqp_state,
1477 				   attr->qp_state);
1478 			ret = -EINVAL;
1479 			goto exit;
1480 		}
1481 		info.curr_iwarp_state = iwqp->iwarp_state;
1482 
1483 		switch (attr->qp_state) {
1484 		case IB_QPS_INIT:
1485 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1486 				ret = -EINVAL;
1487 				goto exit;
1488 			}
1489 
1490 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1491 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1492 				issue_modify_qp = 1;
1493 			}
1494 			break;
1495 		case IB_QPS_RTR:
1496 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1497 				ret = -EINVAL;
1498 				goto exit;
1499 			}
1500 			info.arp_cache_idx_valid = true;
1501 			info.cq_num_valid = true;
1502 			info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1503 			issue_modify_qp = 1;
1504 			break;
1505 		case IB_QPS_RTS:
1506 			if (iwqp->ibqp_state < IB_QPS_RTR ||
1507 			    iwqp->ibqp_state == IB_QPS_ERR) {
1508 				ret = -EINVAL;
1509 				goto exit;
1510 			}
1511 
1512 			info.arp_cache_idx_valid = true;
1513 			info.cq_num_valid = true;
1514 			info.ord_valid = true;
1515 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1516 			issue_modify_qp = 1;
1517 			if (iwdev->push_mode && udata &&
1518 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1519 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1520 				spin_unlock_irqrestore(&iwqp->lock, flags);
1521 				irdma_alloc_push_page(iwqp);
1522 				spin_lock_irqsave(&iwqp->lock, flags);
1523 			}
1524 			break;
1525 		case IB_QPS_SQD:
1526 			if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1527 				goto exit;
1528 
1529 			if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1530 				ret = -EINVAL;
1531 				goto exit;
1532 			}
1533 
1534 			info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1535 			issue_modify_qp = 1;
1536 			iwqp->suspend_pending = true;
1537 			break;
1538 		case IB_QPS_SQE:
1539 		case IB_QPS_ERR:
1540 		case IB_QPS_RESET:
1541 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1542 				iwqp->ibqp_state = attr->qp_state;
1543 				spin_unlock_irqrestore(&iwqp->lock, flags);
1544 				if (udata && udata->inlen) {
1545 					if (ib_copy_from_udata(&ureq, udata,
1546 					    min(sizeof(ureq), udata->inlen)))
1547 						return -EINVAL;
1548 
1549 					irdma_flush_wqes(iwqp,
1550 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1551 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1552 					    IRDMA_REFLUSH);
1553 				}
1554 				return 0;
1555 			}
1556 
1557 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1558 			issue_modify_qp = 1;
1559 			break;
1560 		default:
1561 			ret = -EINVAL;
1562 			goto exit;
1563 		}
1564 
1565 		iwqp->ibqp_state = attr->qp_state;
1566 	}
1567 
1568 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1569 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1570 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1571 	spin_unlock_irqrestore(&iwqp->lock, flags);
1572 
1573 	if (attr_mask & IB_QP_STATE) {
1574 		if (issue_modify_qp) {
1575 			ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1576 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1577 				return -EINVAL;
1578 			if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
1579 				ret = irdma_wait_for_suspend(iwqp);
1580 				if (ret)
1581 					return ret;
1582 			}
1583 			spin_lock_irqsave(&iwqp->lock, flags);
1584 			if (iwqp->iwarp_state == info.curr_iwarp_state) {
1585 				iwqp->iwarp_state = info.next_iwarp_state;
1586 				iwqp->ibqp_state = attr->qp_state;
1587 			}
1588 			if (iwqp->ibqp_state > IB_QPS_RTS &&
1589 			    !iwqp->flush_issued) {
1590 				spin_unlock_irqrestore(&iwqp->lock, flags);
1591 				irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1592 						       IRDMA_FLUSH_RQ |
1593 						       IRDMA_FLUSH_WAIT);
1594 				iwqp->flush_issued = 1;
1595 			} else {
1596 				spin_unlock_irqrestore(&iwqp->lock, flags);
1597 			}
1598 		} else {
1599 			iwqp->ibqp_state = attr->qp_state;
1600 		}
1601 		if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1602 			struct irdma_ucontext *ucontext;
1603 
1604 			ucontext = rdma_udata_to_drv_context(udata,
1605 					struct irdma_ucontext, ibucontext);
1606 			if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1607 			    !iwqp->push_wqe_mmap_entry &&
1608 			    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1609 				&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1610 				uresp.push_valid = 1;
1611 				uresp.push_offset = iwqp->sc_qp.push_offset;
1612 			}
1613 			ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1614 					       udata->outlen));
1615 			if (ret) {
1616 				irdma_remove_push_mmap_entries(iwqp);
1617 				ibdev_dbg(&iwdev->ibdev,
1618 					  "VERBS: copy_to_udata failed\n");
1619 				return ret;
1620 			}
1621 		}
1622 	}
1623 
1624 	return 0;
1625 exit:
1626 	spin_unlock_irqrestore(&iwqp->lock, flags);
1627 
1628 	return ret;
1629 }
1630 
1631 /**
1632  * irdma_modify_qp - modify qp request
1633  * @ibqp: qp's pointer for modify
1634  * @attr: access attributes
1635  * @attr_mask: state mask
1636  * @udata: user data
1637  */
1638 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1639 		    struct ib_udata *udata)
1640 {
1641 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1642 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1643 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1644 	struct irdma_device *iwdev = iwqp->iwdev;
1645 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1646 	struct irdma_qp_host_ctx_info *ctx_info;
1647 	struct irdma_tcp_offload_info *tcp_info;
1648 	struct irdma_iwarp_offload_info *offload_info;
1649 	struct irdma_modify_qp_info info = {};
1650 	struct irdma_modify_qp_resp uresp = {};
1651 	struct irdma_modify_qp_req ureq = {};
1652 	u8 issue_modify_qp = 0;
1653 	u8 dont_wait = 0;
1654 	int err;
1655 	unsigned long flags;
1656 
1657 	if (udata) {
1658 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1659 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1660 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1661 			return -EINVAL;
1662 	}
1663 
1664 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1665 		return -EOPNOTSUPP;
1666 
1667 	ctx_info = &iwqp->ctx_info;
1668 	offload_info = &iwqp->iwarp_info;
1669 	tcp_info = &iwqp->tcp_info;
1670 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1671 	ibdev_dbg(&iwdev->ibdev,
1672 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1673 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1674 		  iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1675 		  iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1676 
1677 	spin_lock_irqsave(&iwqp->lock, flags);
1678 	if (attr_mask & IB_QP_STATE) {
1679 		info.curr_iwarp_state = iwqp->iwarp_state;
1680 		switch (attr->qp_state) {
1681 		case IB_QPS_INIT:
1682 		case IB_QPS_RTR:
1683 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1684 				err = -EINVAL;
1685 				goto exit;
1686 			}
1687 
1688 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1689 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1690 				issue_modify_qp = 1;
1691 			}
1692 			if (iwdev->push_mode && udata &&
1693 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1694 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1695 				spin_unlock_irqrestore(&iwqp->lock, flags);
1696 				irdma_alloc_push_page(iwqp);
1697 				spin_lock_irqsave(&iwqp->lock, flags);
1698 			}
1699 			break;
1700 		case IB_QPS_RTS:
1701 			if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1702 			    !iwqp->cm_id) {
1703 				err = -EINVAL;
1704 				goto exit;
1705 			}
1706 
1707 			issue_modify_qp = 1;
1708 			iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1709 			iwqp->hte_added = 1;
1710 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1711 			info.tcp_ctx_valid = true;
1712 			info.ord_valid = true;
1713 			info.arp_cache_idx_valid = true;
1714 			info.cq_num_valid = true;
1715 			break;
1716 		case IB_QPS_SQD:
1717 			if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1718 				err = 0;
1719 				goto exit;
1720 			}
1721 
1722 			if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1723 			    iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1724 				err = 0;
1725 				goto exit;
1726 			}
1727 
1728 			if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1729 				err = -EINVAL;
1730 				goto exit;
1731 			}
1732 
1733 			info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1734 			issue_modify_qp = 1;
1735 			break;
1736 		case IB_QPS_SQE:
1737 			if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1738 				err = -EINVAL;
1739 				goto exit;
1740 			}
1741 
1742 			info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1743 			issue_modify_qp = 1;
1744 			break;
1745 		case IB_QPS_ERR:
1746 		case IB_QPS_RESET:
1747 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1748 				iwqp->ibqp_state = attr->qp_state;
1749 				spin_unlock_irqrestore(&iwqp->lock, flags);
1750 				if (udata && udata->inlen) {
1751 					if (ib_copy_from_udata(&ureq, udata,
1752 					    min(sizeof(ureq), udata->inlen)))
1753 						return -EINVAL;
1754 
1755 					irdma_flush_wqes(iwqp,
1756 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1757 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1758 					    IRDMA_REFLUSH);
1759 				}
1760 				return 0;
1761 			}
1762 
1763 			if (iwqp->sc_qp.term_flags) {
1764 				spin_unlock_irqrestore(&iwqp->lock, flags);
1765 				irdma_terminate_del_timer(&iwqp->sc_qp);
1766 				spin_lock_irqsave(&iwqp->lock, flags);
1767 			}
1768 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1769 			if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1770 			    iwdev->iw_status &&
1771 			    iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1772 				info.reset_tcp_conn = true;
1773 			else
1774 				dont_wait = 1;
1775 
1776 			issue_modify_qp = 1;
1777 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1778 			break;
1779 		default:
1780 			err = -EINVAL;
1781 			goto exit;
1782 		}
1783 
1784 		iwqp->ibqp_state = attr->qp_state;
1785 	}
1786 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1787 		ctx_info->iwarp_info_valid = true;
1788 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1789 			offload_info->wr_rdresp_en = true;
1790 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1791 			offload_info->wr_rdresp_en = true;
1792 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1793 			offload_info->rd_en = true;
1794 	}
1795 
1796 	if (ctx_info->iwarp_info_valid) {
1797 		ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1798 		ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1799 		irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1800 	}
1801 	spin_unlock_irqrestore(&iwqp->lock, flags);
1802 
1803 	if (attr_mask & IB_QP_STATE) {
1804 		if (issue_modify_qp) {
1805 			ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1806 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1807 				return -EINVAL;
1808 		}
1809 
1810 		spin_lock_irqsave(&iwqp->lock, flags);
1811 		if (iwqp->iwarp_state == info.curr_iwarp_state) {
1812 			iwqp->iwarp_state = info.next_iwarp_state;
1813 			iwqp->ibqp_state = attr->qp_state;
1814 		}
1815 		spin_unlock_irqrestore(&iwqp->lock, flags);
1816 	}
1817 
1818 	if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1819 		if (dont_wait) {
1820 			if (iwqp->hw_tcp_state) {
1821 				spin_lock_irqsave(&iwqp->lock, flags);
1822 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1823 				iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1824 				spin_unlock_irqrestore(&iwqp->lock, flags);
1825 			}
1826 			irdma_cm_disconn(iwqp);
1827 		} else {
1828 			int close_timer_started;
1829 
1830 			spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1831 
1832 			if (iwqp->cm_node) {
1833 				refcount_inc(&iwqp->cm_node->refcnt);
1834 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1835 				close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1836 				if (iwqp->cm_id && close_timer_started == 1)
1837 					irdma_schedule_cm_timer(iwqp->cm_node,
1838 						(struct irdma_puda_buf *)iwqp,
1839 						IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1840 
1841 				irdma_rem_ref_cm_node(iwqp->cm_node);
1842 			} else {
1843 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1844 			}
1845 		}
1846 	}
1847 	if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1848 	    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1849 		struct irdma_ucontext *ucontext;
1850 
1851 		ucontext = rdma_udata_to_drv_context(udata,
1852 					struct irdma_ucontext, ibucontext);
1853 		if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1854 		    !iwqp->push_wqe_mmap_entry &&
1855 		    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1856 			&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1857 			uresp.push_valid = 1;
1858 			uresp.push_offset = iwqp->sc_qp.push_offset;
1859 		}
1860 
1861 		err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1862 				       udata->outlen));
1863 		if (err) {
1864 			irdma_remove_push_mmap_entries(iwqp);
1865 			ibdev_dbg(&iwdev->ibdev,
1866 				  "VERBS: copy_to_udata failed\n");
1867 			return err;
1868 		}
1869 	}
1870 
1871 	return 0;
1872 exit:
1873 	spin_unlock_irqrestore(&iwqp->lock, flags);
1874 
1875 	return err;
1876 }
1877 
1878 /**
1879  * irdma_srq_free_rsrc - free up resources for srq
1880  * @rf: RDMA PCI function
1881  * @iwsrq: srq ptr
1882  */
1883 static void irdma_srq_free_rsrc(struct irdma_pci_f *rf, struct irdma_srq *iwsrq)
1884 {
1885 	struct irdma_sc_srq *srq = &iwsrq->sc_srq;
1886 
1887 	if (!iwsrq->user_mode) {
1888 		dma_free_coherent(rf->sc_dev.hw->device, iwsrq->kmem.size,
1889 				  iwsrq->kmem.va, iwsrq->kmem.pa);
1890 		iwsrq->kmem.va = NULL;
1891 	}
1892 
1893 	irdma_free_rsrc(rf, rf->allocated_srqs, srq->srq_uk.srq_id);
1894 }
1895 
1896 /**
1897  * irdma_cq_free_rsrc - free up resources for cq
1898  * @rf: RDMA PCI function
1899  * @iwcq: cq ptr
1900  */
1901 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1902 {
1903 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1904 
1905 	if (!iwcq->user_mode) {
1906 		dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1907 				  iwcq->kmem.va, iwcq->kmem.pa);
1908 		iwcq->kmem.va = NULL;
1909 		dma_free_coherent(rf->sc_dev.hw->device,
1910 				  iwcq->kmem_shadow.size,
1911 				  iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1912 		iwcq->kmem_shadow.va = NULL;
1913 	}
1914 
1915 	irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1916 }
1917 
1918 /**
1919  * irdma_free_cqbuf - worker to free a cq buffer
1920  * @work: provides access to the cq buffer to free
1921  */
1922 static void irdma_free_cqbuf(struct work_struct *work)
1923 {
1924 	struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1925 
1926 	dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1927 			  cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1928 	cq_buf->kmem_buf.va = NULL;
1929 	kfree(cq_buf);
1930 }
1931 
1932 /**
1933  * irdma_process_resize_list - remove resized cq buffers from the resize_list
1934  * @iwcq: cq which owns the resize_list
1935  * @iwdev: irdma device
1936  * @lcqe_buf: the buffer where the last cqe is received
1937  */
1938 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1939 				     struct irdma_device *iwdev,
1940 				     struct irdma_cq_buf *lcqe_buf)
1941 {
1942 	struct list_head *tmp_node, *list_node;
1943 	struct irdma_cq_buf *cq_buf;
1944 	int cnt = 0;
1945 
1946 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1947 		cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1948 		if (cq_buf == lcqe_buf)
1949 			return cnt;
1950 
1951 		list_del(&cq_buf->list);
1952 		queue_work(iwdev->cleanup_wq, &cq_buf->work);
1953 		cnt++;
1954 	}
1955 
1956 	return cnt;
1957 }
1958 
1959 /**
1960  * irdma_destroy_srq - destroy srq
1961  * @ibsrq: srq pointer
1962  * @udata: user data
1963  */
1964 static int irdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1965 {
1966 	struct irdma_device *iwdev = to_iwdev(ibsrq->device);
1967 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
1968 	struct irdma_sc_srq *srq = &iwsrq->sc_srq;
1969 
1970 	irdma_srq_wq_destroy(iwdev->rf, srq);
1971 	irdma_srq_free_rsrc(iwdev->rf, iwsrq);
1972 	return 0;
1973 }
1974 
1975 /**
1976  * irdma_destroy_cq - destroy cq
1977  * @ib_cq: cq pointer
1978  * @udata: user data
1979  */
1980 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1981 {
1982 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1983 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1984 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1985 	struct irdma_sc_dev *dev = cq->dev;
1986 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1987 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1988 	unsigned long flags;
1989 
1990 	spin_lock_irqsave(&iwcq->lock, flags);
1991 	if (!list_empty(&iwcq->cmpl_generated))
1992 		irdma_remove_cmpls_list(iwcq);
1993 	if (!list_empty(&iwcq->resize_list))
1994 		irdma_process_resize_list(iwcq, iwdev, NULL);
1995 	spin_unlock_irqrestore(&iwcq->lock, flags);
1996 
1997 	irdma_cq_rem_ref(ib_cq);
1998 	wait_for_completion(&iwcq->free_cq);
1999 
2000 	irdma_cq_wq_destroy(iwdev->rf, cq);
2001 
2002 	spin_lock_irqsave(&iwceq->ce_lock, flags);
2003 	irdma_sc_cleanup_ceqes(cq, ceq);
2004 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
2005 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
2006 
2007 	return 0;
2008 }
2009 
2010 /**
2011  * irdma_resize_cq - resize cq
2012  * @ibcq: cq to be resized
2013  * @entries: desired cq size
2014  * @udata: user data
2015  */
2016 static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
2017 			   struct ib_udata *udata)
2018 {
2019 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
2020 	struct irdma_cq *iwcq = to_iwcq(ibcq);
2021 	struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
2022 	struct irdma_cqp_request *cqp_request;
2023 	struct cqp_cmds_info *cqp_info;
2024 	struct irdma_modify_cq_info *m_info;
2025 	struct irdma_modify_cq_info info = {};
2026 	struct irdma_dma_mem kmem_buf;
2027 	struct irdma_cq_mr *cqmr_buf;
2028 	struct irdma_pbl *iwpbl_buf;
2029 	struct irdma_device *iwdev;
2030 	struct irdma_pci_f *rf;
2031 	struct irdma_cq_buf *cq_buf = NULL;
2032 	unsigned long flags;
2033 	u8 cqe_size;
2034 	int ret;
2035 
2036 	iwdev = to_iwdev(ibcq->device);
2037 	rf = iwdev->rf;
2038 
2039 	if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2040 	    IRDMA_FEATURE_CQ_RESIZE))
2041 		return -EOPNOTSUPP;
2042 
2043 	if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
2044 		return -EINVAL;
2045 
2046 	if (entries > rf->max_cqe)
2047 		return -EINVAL;
2048 
2049 	if (!iwcq->user_mode) {
2050 		entries += 2;
2051 
2052 		if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct &&
2053 		    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2054 			entries *= 2;
2055 
2056 		if (entries & 1)
2057 			entries += 1; /* cq size must be an even number */
2058 
2059 		cqe_size = iwcq->sc_cq.cq_uk.avoid_mem_cflct ? 64 : 32;
2060 		if (entries * cqe_size == IRDMA_HW_PAGE_SIZE)
2061 			entries += 2;
2062 	}
2063 
2064 	info.cq_size = max(entries, 4);
2065 
2066 	if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
2067 		return 0;
2068 
2069 	if (udata) {
2070 		struct irdma_resize_cq_req req = {};
2071 		struct irdma_ucontext *ucontext =
2072 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2073 						  ibucontext);
2074 
2075 		/* CQ resize not supported with legacy GEN_1 libi40iw */
2076 		if (ucontext->legacy_mode)
2077 			return -EOPNOTSUPP;
2078 
2079 		if (ib_copy_from_udata(&req, udata,
2080 				       min(sizeof(req), udata->inlen)))
2081 			return -EINVAL;
2082 
2083 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2084 		iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
2085 					  &ucontext->cq_reg_mem_list);
2086 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2087 
2088 		if (!iwpbl_buf)
2089 			return -ENOMEM;
2090 
2091 		cqmr_buf = &iwpbl_buf->cq_mr;
2092 		if (iwpbl_buf->pbl_allocated) {
2093 			info.virtual_map = true;
2094 			info.pbl_chunk_size = 1;
2095 			info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
2096 		} else {
2097 			info.cq_pa = cqmr_buf->cq_pbl.addr;
2098 		}
2099 	} else {
2100 		/* Kmode CQ resize */
2101 		int rsize;
2102 
2103 		rsize = info.cq_size * sizeof(struct irdma_cqe);
2104 		kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
2105 		kmem_buf.va = dma_alloc_coherent(dev->hw->device,
2106 						 kmem_buf.size, &kmem_buf.pa,
2107 						 GFP_KERNEL);
2108 		if (!kmem_buf.va)
2109 			return -ENOMEM;
2110 
2111 		info.cq_base = kmem_buf.va;
2112 		info.cq_pa = kmem_buf.pa;
2113 		cq_buf = kzalloc_obj(*cq_buf);
2114 		if (!cq_buf) {
2115 			ret = -ENOMEM;
2116 			goto error;
2117 		}
2118 	}
2119 
2120 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2121 	if (!cqp_request) {
2122 		ret = -ENOMEM;
2123 		goto error;
2124 	}
2125 
2126 	info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
2127 	info.cq_resize = true;
2128 
2129 	cqp_info = &cqp_request->info;
2130 	m_info = &cqp_info->in.u.cq_modify.info;
2131 	memcpy(m_info, &info, sizeof(*m_info));
2132 
2133 	cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
2134 	cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
2135 	cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
2136 	cqp_info->post_sq = 1;
2137 	ret = irdma_handle_cqp_op(rf, cqp_request);
2138 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2139 	if (ret)
2140 		goto error;
2141 
2142 	spin_lock_irqsave(&iwcq->lock, flags);
2143 	if (cq_buf) {
2144 		cq_buf->kmem_buf = iwcq->kmem;
2145 		cq_buf->hw = dev->hw;
2146 		memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
2147 		INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
2148 		list_add_tail(&cq_buf->list, &iwcq->resize_list);
2149 		iwcq->kmem = kmem_buf;
2150 	}
2151 
2152 	irdma_sc_cq_resize(&iwcq->sc_cq, &info);
2153 	ibcq->cqe = info.cq_size - 1;
2154 	spin_unlock_irqrestore(&iwcq->lock, flags);
2155 
2156 	return 0;
2157 error:
2158 	if (!udata) {
2159 		dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
2160 				  kmem_buf.pa);
2161 		kmem_buf.va = NULL;
2162 	}
2163 	kfree(cq_buf);
2164 
2165 	return ret;
2166 }
2167 
2168 /**
2169  * irdma_srq_event - event notification for srq limit
2170  * @srq: shared srq struct
2171  */
2172 void irdma_srq_event(struct irdma_sc_srq *srq)
2173 {
2174 	struct irdma_srq *iwsrq = container_of(srq, struct irdma_srq, sc_srq);
2175 	struct ib_srq *ibsrq = &iwsrq->ibsrq;
2176 	struct ib_event event;
2177 
2178 	srq->srq_limit = 0;
2179 
2180 	if (!ibsrq->event_handler)
2181 		return;
2182 
2183 	event.device = ibsrq->device;
2184 	event.element.port_num = 1;
2185 	event.element.srq = ibsrq;
2186 	event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2187 	ibsrq->event_handler(&event, ibsrq->srq_context);
2188 }
2189 
2190 /**
2191  * irdma_modify_srq - modify srq request
2192  * @ibsrq: srq's pointer for modify
2193  * @attr: access attributes
2194  * @attr_mask: state mask
2195  * @udata: user data
2196  */
2197 static int irdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2198 			    enum ib_srq_attr_mask attr_mask,
2199 			    struct ib_udata *udata)
2200 {
2201 	struct irdma_device *iwdev = to_iwdev(ibsrq->device);
2202 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
2203 	struct irdma_cqp_request *cqp_request;
2204 	struct irdma_pci_f *rf = iwdev->rf;
2205 	struct irdma_modify_srq_info *info;
2206 	struct cqp_cmds_info *cqp_info;
2207 	int status;
2208 
2209 	if (attr_mask & IB_SRQ_MAX_WR)
2210 		return -EINVAL;
2211 
2212 	if (!(attr_mask & IB_SRQ_LIMIT))
2213 		return 0;
2214 
2215 	if (attr->srq_limit > iwsrq->sc_srq.srq_uk.srq_size)
2216 		return -EINVAL;
2217 
2218 	/* Execute this cqp op synchronously, so we can update srq_limit
2219 	 * upon successful completion.
2220 	 */
2221 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2222 	if (!cqp_request)
2223 		return -ENOMEM;
2224 
2225 	cqp_info = &cqp_request->info;
2226 	info = &cqp_info->in.u.srq_modify.info;
2227 	info->srq_limit = attr->srq_limit;
2228 	if (info->srq_limit > 0xFFF)
2229 		info->srq_limit = 0xFFF;
2230 	info->arm_limit_event = 1;
2231 
2232 	cqp_info->cqp_cmd = IRDMA_OP_SRQ_MODIFY;
2233 	cqp_info->post_sq = 1;
2234 	cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq;
2235 	cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request;
2236 	status = irdma_handle_cqp_op(rf, cqp_request);
2237 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2238 	if (status)
2239 		return status;
2240 
2241 	iwsrq->sc_srq.srq_limit = info->srq_limit;
2242 
2243 	return 0;
2244 }
2245 
2246 static int irdma_setup_umode_srq(struct irdma_device *iwdev,
2247 				 struct irdma_srq *iwsrq,
2248 				 struct irdma_srq_init_info *info,
2249 				 struct ib_udata *udata)
2250 {
2251 #define IRDMA_CREATE_SRQ_MIN_REQ_LEN \
2252 	offsetofend(struct irdma_create_srq_req, user_shadow_area)
2253 	struct irdma_create_srq_req req = {};
2254 	struct irdma_ucontext *ucontext;
2255 	struct irdma_srq_mr *srqmr;
2256 	struct irdma_pbl *iwpbl;
2257 	unsigned long flags;
2258 
2259 	iwsrq->user_mode = true;
2260 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2261 					     ibucontext);
2262 
2263 	if (udata->inlen < IRDMA_CREATE_SRQ_MIN_REQ_LEN)
2264 		return -EINVAL;
2265 
2266 	if (ib_copy_from_udata(&req, udata,
2267 			       min(sizeof(req), udata->inlen)))
2268 		return -EFAULT;
2269 
2270 	spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
2271 	iwpbl = irdma_get_pbl((unsigned long)req.user_srq_buf,
2272 			      &ucontext->srq_reg_mem_list);
2273 	spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
2274 	if (!iwpbl)
2275 		return -EPROTO;
2276 
2277 	iwsrq->iwpbl = iwpbl;
2278 	srqmr = &iwpbl->srq_mr;
2279 
2280 	if (iwpbl->pbl_allocated) {
2281 		info->virtual_map = true;
2282 		info->pbl_chunk_size = 1;
2283 		info->first_pm_pbl_idx = srqmr->srq_pbl.idx;
2284 		info->leaf_pbl_size = 1;
2285 	} else {
2286 		info->srq_pa = srqmr->srq_pbl.addr;
2287 	}
2288 	info->shadow_area_pa = srqmr->shadow;
2289 
2290 	return 0;
2291 }
2292 
2293 static int irdma_setup_kmode_srq(struct irdma_device *iwdev,
2294 				 struct irdma_srq *iwsrq,
2295 				 struct irdma_srq_init_info *info, u32 depth,
2296 				 u8 shift)
2297 {
2298 	struct irdma_srq_uk_init_info *ukinfo = &info->srq_uk_init_info;
2299 	struct irdma_dma_mem *mem = &iwsrq->kmem;
2300 	u32 size, ring_size;
2301 
2302 	ring_size = depth * IRDMA_QP_WQE_MIN_SIZE;
2303 	size = ring_size + (IRDMA_SHADOW_AREA_SIZE << 3);
2304 
2305 	mem->size = ALIGN(size, 256);
2306 	mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
2307 				     &mem->pa, GFP_KERNEL);
2308 	if (!mem->va)
2309 		return -ENOMEM;
2310 
2311 	ukinfo->srq = mem->va;
2312 	ukinfo->srq_size = depth >> shift;
2313 	ukinfo->shadow_area = mem->va + ring_size;
2314 
2315 	info->srq_pa = mem->pa;
2316 	info->shadow_area_pa = info->srq_pa + ring_size;
2317 
2318 	return 0;
2319 }
2320 
2321 /**
2322  * irdma_create_srq - create srq
2323  * @ibsrq: ib's srq pointer
2324  * @initattrs: attributes for srq
2325  * @udata: user data for create srq
2326  */
2327 static int irdma_create_srq(struct ib_srq *ibsrq,
2328 			    struct ib_srq_init_attr *initattrs,
2329 			    struct ib_udata *udata)
2330 {
2331 	struct irdma_device *iwdev = to_iwdev(ibsrq->device);
2332 	struct ib_srq_attr *attr = &initattrs->attr;
2333 	struct irdma_pd *iwpd = to_iwpd(ibsrq->pd);
2334 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
2335 	struct irdma_srq_uk_init_info *ukinfo;
2336 	struct irdma_cqp_request *cqp_request;
2337 	struct irdma_srq_init_info info = {};
2338 	struct irdma_pci_f *rf = iwdev->rf;
2339 	struct irdma_uk_attrs *uk_attrs;
2340 	struct cqp_cmds_info *cqp_info;
2341 	int err_code = 0;
2342 	u32 depth;
2343 	u8 shift;
2344 
2345 	uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs;
2346 	ukinfo = &info.srq_uk_init_info;
2347 
2348 	if (initattrs->srq_type != IB_SRQT_BASIC)
2349 		return -EOPNOTSUPP;
2350 
2351 	if (!(uk_attrs->feature_flags & IRDMA_FEATURE_SRQ) ||
2352 	    attr->max_sge > uk_attrs->max_hw_wq_frags)
2353 		return -EINVAL;
2354 
2355 	refcount_set(&iwsrq->refcnt, 1);
2356 	spin_lock_init(&iwsrq->lock);
2357 	err_code = irdma_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq,
2358 				    &iwsrq->srq_num, &rf->next_srq);
2359 	if (err_code)
2360 		return err_code;
2361 
2362 	ukinfo->max_srq_frag_cnt = attr->max_sge;
2363 	ukinfo->uk_attrs = uk_attrs;
2364 	ukinfo->srq_id = iwsrq->srq_num;
2365 
2366 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_srq_frag_cnt, 0,
2367 			    &shift);
2368 
2369 	err_code = irdma_get_srqdepth(ukinfo->uk_attrs, attr->max_wr,
2370 				      shift, &depth);
2371 	if (err_code)
2372 		return err_code;
2373 
2374 	/* Actual SRQ size in WRs for ring and HW */
2375 	ukinfo->srq_size = depth >> shift;
2376 
2377 	/* Max postable WRs to SRQ */
2378 	iwsrq->max_wr = (depth - IRDMA_RQ_RSVD) >> shift;
2379 	attr->max_wr = iwsrq->max_wr;
2380 
2381 	if (udata)
2382 		err_code = irdma_setup_umode_srq(iwdev, iwsrq, &info, udata);
2383 	else
2384 		err_code = irdma_setup_kmode_srq(iwdev, iwsrq, &info, depth,
2385 						 shift);
2386 
2387 	if (err_code)
2388 		goto free_rsrc;
2389 
2390 	info.vsi = &iwdev->vsi;
2391 	info.pd = &iwpd->sc_pd;
2392 
2393 	iwsrq->sc_srq.srq_uk.lock = &iwsrq->lock;
2394 	err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info);
2395 	if (err_code)
2396 		goto free_dmem;
2397 
2398 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2399 	if (!cqp_request) {
2400 		err_code = -ENOMEM;
2401 		goto free_dmem;
2402 	}
2403 
2404 	cqp_info = &cqp_request->info;
2405 	cqp_info->cqp_cmd = IRDMA_OP_SRQ_CREATE;
2406 	cqp_info->post_sq = 1;
2407 	cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq;
2408 	cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request;
2409 	err_code = irdma_handle_cqp_op(rf, cqp_request);
2410 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2411 	if (err_code)
2412 		goto free_dmem;
2413 
2414 	if (udata) {
2415 		struct irdma_create_srq_resp resp = {};
2416 
2417 		resp.srq_id = iwsrq->srq_num;
2418 		resp.srq_size = ukinfo->srq_size;
2419 		if (ib_copy_to_udata(udata, &resp,
2420 				     min(sizeof(resp), udata->outlen))) {
2421 			err_code = -EPROTO;
2422 			goto srq_destroy;
2423 		}
2424 	}
2425 
2426 	return 0;
2427 
2428 srq_destroy:
2429 	irdma_srq_wq_destroy(rf, &iwsrq->sc_srq);
2430 
2431 free_dmem:
2432 	if (!iwsrq->user_mode)
2433 		dma_free_coherent(rf->hw.device, iwsrq->kmem.size,
2434 				  iwsrq->kmem.va, iwsrq->kmem.pa);
2435 free_rsrc:
2436 	irdma_free_rsrc(rf, rf->allocated_srqs, iwsrq->srq_num);
2437 	return err_code;
2438 }
2439 
2440 /**
2441  * irdma_query_srq - get SRQ attributes
2442  * @ibsrq: the SRQ to query
2443  * @attr: the attributes of the SRQ
2444  */
2445 static int irdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
2446 {
2447 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
2448 
2449 	attr->max_wr = iwsrq->max_wr;
2450 	attr->max_sge = iwsrq->sc_srq.srq_uk.max_srq_frag_cnt;
2451 	attr->srq_limit = iwsrq->sc_srq.srq_limit;
2452 
2453 	return 0;
2454 }
2455 
2456 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
2457 {
2458 	/* GEN1/2 does not support CQ create flags */
2459 	if (hw_rev <= IRDMA_GEN_2)
2460 		return flags ? -EOPNOTSUPP : 0;
2461 
2462 	return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
2463 }
2464 
2465 /**
2466  * irdma_create_cq - create cq
2467  * @ibcq: CQ allocated
2468  * @attr: attributes for cq
2469  * @attrs: uverbs attribute bundle
2470  */
2471 static int irdma_create_cq(struct ib_cq *ibcq,
2472 			   const struct ib_cq_init_attr *attr,
2473 			   struct uverbs_attr_bundle *attrs)
2474 {
2475 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
2476 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
2477 	struct ib_udata *udata = &attrs->driver_udata;
2478 	struct ib_device *ibdev = ibcq->device;
2479 	struct irdma_device *iwdev = to_iwdev(ibdev);
2480 	struct irdma_pci_f *rf = iwdev->rf;
2481 	struct irdma_cq *iwcq = to_iwcq(ibcq);
2482 	u32 cq_num = 0;
2483 	struct irdma_sc_cq *cq;
2484 	struct irdma_sc_dev *dev = &rf->sc_dev;
2485 	struct irdma_cq_init_info info = {};
2486 	struct irdma_cqp_request *cqp_request;
2487 	struct cqp_cmds_info *cqp_info;
2488 	struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
2489 	unsigned long flags;
2490 	int err_code;
2491 	int entries = attr->cqe;
2492 	bool cqe_64byte_ena;
2493 	u8 cqe_size;
2494 
2495 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
2496 	if (err_code)
2497 		return err_code;
2498 
2499 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
2500 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
2501 		return -EINVAL;
2502 
2503 	err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
2504 				    &rf->next_cq);
2505 	if (err_code)
2506 		return err_code;
2507 
2508 	cq = &iwcq->sc_cq;
2509 	cq->back_cq = iwcq;
2510 	refcount_set(&iwcq->refcnt, 1);
2511 	spin_lock_init(&iwcq->lock);
2512 	INIT_LIST_HEAD(&iwcq->resize_list);
2513 	INIT_LIST_HEAD(&iwcq->cmpl_generated);
2514 	iwcq->cq_num = cq_num;
2515 	info.dev = dev;
2516 	ukinfo->cq_size = max(entries, 4);
2517 	ukinfo->cq_id = cq_num;
2518 	cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ?
2519 			 true : false;
2520 	cqe_size = cqe_64byte_ena ? 64 : 32;
2521 	ukinfo->avoid_mem_cflct = cqe_64byte_ena;
2522 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
2523 	if (attr->comp_vector < rf->ceqs_count)
2524 		info.ceq_id = attr->comp_vector;
2525 	info.ceq_id_valid = true;
2526 	info.ceqe_mask = 1;
2527 	info.type = IRDMA_CQ_TYPE_IWARP;
2528 	info.vsi = &iwdev->vsi;
2529 
2530 	if (udata) {
2531 		struct irdma_ucontext *ucontext;
2532 		struct irdma_create_cq_req req = {};
2533 		struct irdma_cq_mr *cqmr;
2534 		struct irdma_pbl *iwpbl;
2535 		struct irdma_pbl *iwpbl_shadow;
2536 		struct irdma_cq_mr *cqmr_shadow;
2537 
2538 		iwcq->user_mode = true;
2539 		ucontext =
2540 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2541 						  ibucontext);
2542 		if (ib_copy_from_udata(&req, udata,
2543 				       min(sizeof(req), udata->inlen))) {
2544 			err_code = -EFAULT;
2545 			goto cq_free_rsrc;
2546 		}
2547 
2548 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2549 		iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2550 				      &ucontext->cq_reg_mem_list);
2551 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2552 		if (!iwpbl) {
2553 			err_code = -EPROTO;
2554 			goto cq_free_rsrc;
2555 		}
2556 
2557 		cqmr = &iwpbl->cq_mr;
2558 
2559 		if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2560 		    IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2561 			spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2562 			iwpbl_shadow = irdma_get_pbl(
2563 					(unsigned long)req.user_shadow_area,
2564 					&ucontext->cq_reg_mem_list);
2565 			spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2566 
2567 			if (!iwpbl_shadow) {
2568 				err_code = -EPROTO;
2569 				goto cq_free_rsrc;
2570 			}
2571 			cqmr_shadow = &iwpbl_shadow->cq_mr;
2572 			info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2573 			cqmr->split = true;
2574 		} else {
2575 			info.shadow_area_pa = cqmr->shadow;
2576 		}
2577 		if (iwpbl->pbl_allocated) {
2578 			info.virtual_map = true;
2579 			info.pbl_chunk_size = 1;
2580 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2581 		} else {
2582 			info.cq_base_pa = cqmr->cq_pbl.addr;
2583 		}
2584 	} else {
2585 		/* Kmode allocations */
2586 		int rsize;
2587 
2588 		if (entries < 1 || entries > rf->max_cqe) {
2589 			err_code = -EINVAL;
2590 			goto cq_free_rsrc;
2591 		}
2592 
2593 		entries += 2;
2594 		if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2595 			entries *= 2;
2596 
2597 		if (entries & 1)
2598 			entries += 1; /* cq size must be an even number */
2599 
2600 		if (entries * cqe_size == IRDMA_HW_PAGE_SIZE)
2601 			entries += 2;
2602 
2603 		ukinfo->cq_size = entries;
2604 
2605 		if (cqe_64byte_ena)
2606 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
2607 		else
2608 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2609 		iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2610 		iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2611 						   iwcq->kmem.size,
2612 						   &iwcq->kmem.pa, GFP_KERNEL);
2613 		if (!iwcq->kmem.va) {
2614 			err_code = -ENOMEM;
2615 			goto cq_free_rsrc;
2616 		}
2617 
2618 		iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2619 					       64);
2620 		iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2621 							  iwcq->kmem_shadow.size,
2622 							  &iwcq->kmem_shadow.pa,
2623 							  GFP_KERNEL);
2624 		if (!iwcq->kmem_shadow.va) {
2625 			err_code = -ENOMEM;
2626 			goto cq_free_rsrc;
2627 		}
2628 		info.shadow_area_pa = iwcq->kmem_shadow.pa;
2629 		ukinfo->shadow_area = iwcq->kmem_shadow.va;
2630 		ukinfo->cq_base = iwcq->kmem.va;
2631 		info.cq_base_pa = iwcq->kmem.pa;
2632 	}
2633 
2634 	info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2635 					 (u32)IRDMA_MAX_CQ_READ_THRESH);
2636 
2637 	if (irdma_sc_cq_init(cq, &info)) {
2638 		ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2639 		err_code = -EPROTO;
2640 		goto cq_free_rsrc;
2641 	}
2642 
2643 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2644 	if (!cqp_request) {
2645 		err_code = -ENOMEM;
2646 		goto cq_free_rsrc;
2647 	}
2648 
2649 	cqp_info = &cqp_request->info;
2650 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2651 	cqp_info->post_sq = 1;
2652 	cqp_info->in.u.cq_create.cq = cq;
2653 	cqp_info->in.u.cq_create.check_overflow = true;
2654 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2655 	err_code = irdma_handle_cqp_op(rf, cqp_request);
2656 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2657 	if (err_code)
2658 		goto cq_free_rsrc;
2659 
2660 	if (udata) {
2661 		struct irdma_create_cq_resp resp = {};
2662 
2663 		resp.cq_id = info.cq_uk_init_info.cq_id;
2664 		resp.cq_size = info.cq_uk_init_info.cq_size;
2665 		if (ib_copy_to_udata(udata, &resp,
2666 				     min(sizeof(resp), udata->outlen))) {
2667 			ibdev_dbg(&iwdev->ibdev,
2668 				  "VERBS: copy to user data\n");
2669 			err_code = -EPROTO;
2670 			goto cq_destroy;
2671 		}
2672 	}
2673 
2674 	init_completion(&iwcq->free_cq);
2675 
2676 	/* Populate table entry after CQ is fully created. */
2677 	smp_store_release(&rf->cq_table[cq_num], iwcq);
2678 
2679 	return 0;
2680 cq_destroy:
2681 	irdma_cq_wq_destroy(rf, cq);
2682 cq_free_rsrc:
2683 	irdma_cq_free_rsrc(rf, iwcq);
2684 
2685 	return err_code;
2686 }
2687 
2688 /**
2689  * irdma_get_mr_access - get hw MR access permissions from IB access flags
2690  * @access: IB access flags
2691  * @hw_rev: Hardware version
2692  */
2693 static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
2694 {
2695 	u16 hw_access = 0;
2696 
2697 	hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2698 		     IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2699 	hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2700 		     IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2701 	hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2702 		     IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2703 	if (hw_rev >= IRDMA_GEN_3) {
2704 		hw_access |= (access & IB_ACCESS_MW_BIND) ?
2705 			     IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2706 	}
2707 	hw_access |= (access & IB_ZERO_BASED) ?
2708 		     IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2709 	hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2710 
2711 	return hw_access;
2712 }
2713 
2714 /**
2715  * irdma_free_stag - free stag resource
2716  * @iwdev: irdma device
2717  * @stag: stag to free
2718  */
2719 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2720 {
2721 	u32 stag_idx;
2722 
2723 	stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2724 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2725 }
2726 
2727 /**
2728  * irdma_create_stag - create random stag
2729  * @iwdev: irdma device
2730  */
2731 static u32 irdma_create_stag(struct irdma_device *iwdev)
2732 {
2733 	u32 stag = 0;
2734 	u32 stag_index = 0;
2735 	u32 next_stag_index;
2736 	u32 driver_key;
2737 	u32 random;
2738 	u8 consumer_key;
2739 	int ret;
2740 
2741 	get_random_bytes(&random, sizeof(random));
2742 	consumer_key = (u8)random;
2743 
2744 	driver_key = random & ~iwdev->rf->mr_stagmask;
2745 	next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2746 	next_stag_index %= iwdev->rf->max_mr;
2747 
2748 	ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2749 			       iwdev->rf->max_mr, &stag_index,
2750 			       &next_stag_index);
2751 	if (ret)
2752 		return stag;
2753 	stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2754 	stag |= driver_key;
2755 	stag += (u32)consumer_key;
2756 
2757 	return stag;
2758 }
2759 
2760 /**
2761  * irdma_next_pbl_addr - Get next pbl address
2762  * @pbl: pointer to a pble
2763  * @pinfo: info pointer
2764  * @idx: index
2765  */
2766 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2767 				       u32 *idx)
2768 {
2769 	*idx += 1;
2770 	if (!(*pinfo) || *idx != (*pinfo)->cnt)
2771 		return ++pbl;
2772 	*idx = 0;
2773 	(*pinfo)++;
2774 
2775 	return (*pinfo)->addr;
2776 }
2777 
2778 /**
2779  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2780  * @iwmr: iwmr for IB's user page addresses
2781  * @pbl: ple pointer to save 1 level or 0 level pble
2782  * @level: indicated level 0, 1 or 2
2783  */
2784 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2785 				    enum irdma_pble_level level)
2786 {
2787 	struct ib_umem *region = iwmr->region;
2788 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2789 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2790 	struct irdma_pble_info *pinfo;
2791 	struct ib_block_iter biter;
2792 	u32 idx = 0;
2793 	u32 pbl_cnt = 0;
2794 
2795 	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2796 
2797 	if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2798 		iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2799 
2800 	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2801 		*pbl = rdma_block_iter_dma_address(&biter);
2802 		if (++pbl_cnt == palloc->total_cnt)
2803 			break;
2804 		pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2805 	}
2806 }
2807 
2808 /**
2809  * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2810  * @arr: lvl1 pbl array
2811  * @npages: page count
2812  * @pg_size: page size
2813  *
2814  */
2815 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2816 {
2817 	u32 pg_idx;
2818 
2819 	for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2820 		if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2821 			return false;
2822 	}
2823 
2824 	return true;
2825 }
2826 
2827 /**
2828  * irdma_check_mr_contiguous - check if MR is physically contiguous
2829  * @palloc: pbl allocation struct
2830  * @pg_size: page size
2831  */
2832 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2833 				      u32 pg_size)
2834 {
2835 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
2836 	struct irdma_pble_info *leaf = lvl2->leaf;
2837 	u64 *arr = NULL;
2838 	u64 *start_addr = NULL;
2839 	int i;
2840 	bool ret;
2841 
2842 	if (palloc->level == PBLE_LEVEL_1) {
2843 		arr = palloc->level1.addr;
2844 		ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2845 						 pg_size);
2846 		return ret;
2847 	}
2848 
2849 	start_addr = leaf->addr;
2850 
2851 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2852 		arr = leaf->addr;
2853 		if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2854 			return false;
2855 		ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2856 		if (!ret)
2857 			return false;
2858 	}
2859 
2860 	return true;
2861 }
2862 
2863 /**
2864  * irdma_setup_pbles - copy user pg address to pble's
2865  * @rf: RDMA PCI function
2866  * @iwmr: mr pointer for this memory registration
2867  * @lvl: requested pble levels
2868  */
2869 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2870 			     u8 lvl)
2871 {
2872 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2873 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2874 	struct irdma_pble_info *pinfo;
2875 	u64 *pbl;
2876 	int status;
2877 	enum irdma_pble_level level = PBLE_LEVEL_1;
2878 
2879 	if (lvl) {
2880 		status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2881 					lvl);
2882 		if (status)
2883 			return status;
2884 
2885 		iwpbl->pbl_allocated = true;
2886 		level = palloc->level;
2887 		pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2888 						  palloc->level2.leaf;
2889 		pbl = pinfo->addr;
2890 	} else {
2891 		pbl = iwmr->pgaddrmem;
2892 	}
2893 
2894 	irdma_copy_user_pgaddrs(iwmr, pbl, level);
2895 
2896 	if (lvl)
2897 		iwmr->pgaddrmem[0] = *pbl;
2898 
2899 	return 0;
2900 }
2901 
2902 /**
2903  * irdma_handle_q_mem - handle memory for qp and cq
2904  * @iwdev: irdma device
2905  * @req: information for q memory management
2906  * @iwpbl: pble struct
2907  * @lvl: pble level mask
2908  */
2909 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2910 			      struct irdma_mem_reg_req *req,
2911 			      struct irdma_pbl *iwpbl, u8 lvl)
2912 {
2913 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2914 	struct irdma_mr *iwmr = iwpbl->iwmr;
2915 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2916 	struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2917 	struct irdma_srq_mr *srqmr = &iwpbl->srq_mr;
2918 	struct irdma_hmc_pble *hmc_p;
2919 	u64 *arr = iwmr->pgaddrmem;
2920 	u32 pg_size, total;
2921 	int err = 0;
2922 	bool ret = true;
2923 
2924 	pg_size = iwmr->page_size;
2925 	err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2926 	if (err)
2927 		return err;
2928 
2929 	if (lvl)
2930 		arr = palloc->level1.addr;
2931 
2932 	switch (iwmr->type) {
2933 	case IRDMA_MEMREG_TYPE_QP:
2934 		total = req->sq_pages + req->rq_pages;
2935 		hmc_p = &qpmr->sq_pbl;
2936 		qpmr->shadow = (dma_addr_t)arr[total];
2937 		/* Need to use physical address for RQ of QP
2938 		 * in case it is associated with SRQ.
2939 		 */
2940 		qpmr->rq_pa = (dma_addr_t)arr[req->sq_pages];
2941 		if (lvl) {
2942 			ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2943 							 pg_size);
2944 			if (ret)
2945 				ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2946 								 req->rq_pages,
2947 								 pg_size);
2948 		}
2949 
2950 		if (!ret) {
2951 			hmc_p->idx = palloc->level1.idx;
2952 			hmc_p = &qpmr->rq_pbl;
2953 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
2954 		} else {
2955 			hmc_p->addr = arr[0];
2956 			hmc_p = &qpmr->rq_pbl;
2957 			hmc_p->addr = arr[req->sq_pages];
2958 		}
2959 		break;
2960 	case IRDMA_MEMREG_TYPE_SRQ:
2961 		hmc_p = &srqmr->srq_pbl;
2962 		srqmr->shadow = (dma_addr_t)arr[req->rq_pages];
2963 		if (lvl)
2964 			ret = irdma_check_mem_contiguous(arr, req->rq_pages,
2965 							 pg_size);
2966 
2967 		if (!ret)
2968 			hmc_p->idx = palloc->level1.idx;
2969 		else
2970 			hmc_p->addr = arr[0];
2971 	break;
2972 	case IRDMA_MEMREG_TYPE_CQ:
2973 		hmc_p = &cqmr->cq_pbl;
2974 
2975 		if (!cqmr->split)
2976 			cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2977 
2978 		if (lvl)
2979 			ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2980 							 pg_size);
2981 
2982 		if (!ret)
2983 			hmc_p->idx = palloc->level1.idx;
2984 		else
2985 			hmc_p->addr = arr[0];
2986 	break;
2987 	default:
2988 		ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2989 		err = -EINVAL;
2990 	}
2991 
2992 	if (lvl && ret) {
2993 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2994 		iwpbl->pbl_allocated = false;
2995 	}
2996 
2997 	return err;
2998 }
2999 
3000 /**
3001  * irdma_hw_alloc_mw - create the hw memory window
3002  * @iwdev: irdma device
3003  * @iwmr: pointer to memory window info
3004  */
3005 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
3006 {
3007 	struct irdma_mw_alloc_info *info;
3008 	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
3009 	struct irdma_cqp_request *cqp_request;
3010 	struct cqp_cmds_info *cqp_info;
3011 	int status;
3012 
3013 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3014 	if (!cqp_request)
3015 		return -ENOMEM;
3016 
3017 	cqp_info = &cqp_request->info;
3018 	info = &cqp_info->in.u.mw_alloc.info;
3019 	memset(info, 0, sizeof(*info));
3020 	if (iwmr->ibmw.type == IB_MW_TYPE_1)
3021 		info->mw_wide = true;
3022 
3023 	info->page_size = PAGE_SIZE;
3024 	info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
3025 	info->pd_id = iwpd->sc_pd.pd_id;
3026 	info->remote_access = true;
3027 	cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
3028 	cqp_info->post_sq = 1;
3029 	cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
3030 	cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
3031 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3032 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3033 
3034 	return status;
3035 }
3036 
3037 /**
3038  * irdma_alloc_mw - Allocate memory window
3039  * @ibmw: Memory Window
3040  * @udata: user data pointer
3041  */
3042 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
3043 {
3044 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
3045 	struct irdma_mr *iwmr = to_iwmw(ibmw);
3046 	int err_code;
3047 	u32 stag;
3048 
3049 	stag = irdma_create_stag(iwdev);
3050 	if (!stag)
3051 		return -ENOMEM;
3052 
3053 	iwmr->stag = stag;
3054 	ibmw->rkey = stag;
3055 
3056 	err_code = irdma_hw_alloc_mw(iwdev, iwmr);
3057 	if (err_code) {
3058 		irdma_free_stag(iwdev, stag);
3059 		return err_code;
3060 	}
3061 
3062 	return 0;
3063 }
3064 
3065 /**
3066  * irdma_dealloc_mw - Dealloc memory window
3067  * @ibmw: memory window structure.
3068  */
3069 static int irdma_dealloc_mw(struct ib_mw *ibmw)
3070 {
3071 	struct ib_pd *ibpd = ibmw->pd;
3072 	struct irdma_pd *iwpd = to_iwpd(ibpd);
3073 	struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
3074 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
3075 	struct irdma_cqp_request *cqp_request;
3076 	struct cqp_cmds_info *cqp_info;
3077 	struct irdma_dealloc_stag_info *info;
3078 
3079 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3080 	if (!cqp_request)
3081 		return -ENOMEM;
3082 
3083 	cqp_info = &cqp_request->info;
3084 	info = &cqp_info->in.u.dealloc_stag.info;
3085 	memset(info, 0, sizeof(*info));
3086 	info->pd_id = iwpd->sc_pd.pd_id;
3087 	info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3088 	info->mr = false;
3089 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3090 	cqp_info->post_sq = 1;
3091 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3092 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3093 	irdma_handle_cqp_op(iwdev->rf, cqp_request);
3094 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3095 	irdma_free_stag(iwdev, iwmr->stag);
3096 
3097 	return 0;
3098 }
3099 
3100 /**
3101  * irdma_hw_alloc_stag - cqp command to allocate stag
3102  * @iwdev: irdma device
3103  * @iwmr: irdma mr pointer
3104  */
3105 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
3106 			       struct irdma_mr *iwmr)
3107 {
3108 	struct irdma_allocate_stag_info *info;
3109 	struct ib_pd *pd = iwmr->ibmr.pd;
3110 	struct irdma_pd *iwpd = to_iwpd(pd);
3111 	int status;
3112 	struct irdma_cqp_request *cqp_request;
3113 	struct cqp_cmds_info *cqp_info;
3114 
3115 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3116 	if (!cqp_request)
3117 		return -ENOMEM;
3118 
3119 	cqp_info = &cqp_request->info;
3120 	info = &cqp_info->in.u.alloc_stag.info;
3121 	info->page_size = PAGE_SIZE;
3122 	info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
3123 	info->pd_id = iwpd->sc_pd.pd_id;
3124 	info->total_len = iwmr->len;
3125 	info->remote_access = true;
3126 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
3127 	cqp_info->post_sq = 1;
3128 	cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
3129 	cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
3130 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3131 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3132 	if (status)
3133 		return status;
3134 
3135 	iwmr->is_hwreg = true;
3136 	return 0;
3137 }
3138 
3139 /**
3140  * irdma_alloc_mr - register stag for fast memory registration
3141  * @pd: ibpd pointer
3142  * @mr_type: memory for stag registrion
3143  * @max_num_sg: man number of pages
3144  */
3145 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
3146 				    u32 max_num_sg)
3147 {
3148 	struct irdma_device *iwdev = to_iwdev(pd->device);
3149 	struct irdma_pble_alloc *palloc;
3150 	struct irdma_pbl *iwpbl;
3151 	struct irdma_mr *iwmr;
3152 	u32 stag;
3153 	int err_code;
3154 
3155 	iwmr = kzalloc_obj(*iwmr);
3156 	if (!iwmr)
3157 		return ERR_PTR(-ENOMEM);
3158 
3159 	stag = irdma_create_stag(iwdev);
3160 	if (!stag) {
3161 		err_code = -ENOMEM;
3162 		goto err;
3163 	}
3164 
3165 	iwmr->stag = stag;
3166 	iwmr->ibmr.rkey = stag;
3167 	iwmr->ibmr.lkey = stag;
3168 	iwmr->ibmr.pd = pd;
3169 	iwmr->ibmr.device = pd->device;
3170 	iwpbl = &iwmr->iwpbl;
3171 	iwpbl->iwmr = iwmr;
3172 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3173 	palloc = &iwpbl->pble_alloc;
3174 	iwmr->page_cnt = max_num_sg;
3175 	/* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
3176 	iwmr->len = max_num_sg * PAGE_SIZE;
3177 	err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
3178 				  false);
3179 	if (err_code)
3180 		goto err_get_pble;
3181 
3182 	err_code = irdma_hw_alloc_stag(iwdev, iwmr);
3183 	if (err_code)
3184 		goto err_alloc_stag;
3185 
3186 	iwpbl->pbl_allocated = true;
3187 
3188 	return &iwmr->ibmr;
3189 err_alloc_stag:
3190 	irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3191 err_get_pble:
3192 	irdma_free_stag(iwdev, stag);
3193 err:
3194 	kfree(iwmr);
3195 
3196 	return ERR_PTR(err_code);
3197 }
3198 
3199 /**
3200  * irdma_set_page - populate pbl list for fmr
3201  * @ibmr: ib mem to access iwarp mr pointer
3202  * @addr: page dma address fro pbl list
3203  */
3204 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
3205 {
3206 	struct irdma_mr *iwmr = to_iwmr(ibmr);
3207 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3208 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3209 	u64 *pbl;
3210 
3211 	if (unlikely(iwmr->npages == iwmr->page_cnt))
3212 		return -ENOMEM;
3213 
3214 	if (palloc->level == PBLE_LEVEL_2) {
3215 		struct irdma_pble_info *palloc_info =
3216 			palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
3217 
3218 		palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
3219 	} else {
3220 		pbl = palloc->level1.addr;
3221 		pbl[iwmr->npages] = addr;
3222 	}
3223 	iwmr->npages++;
3224 
3225 	return 0;
3226 }
3227 
3228 /**
3229  * irdma_map_mr_sg - map of sg list for fmr
3230  * @ibmr: ib mem to access iwarp mr pointer
3231  * @sg: scatter gather list
3232  * @sg_nents: number of sg pages
3233  * @sg_offset: scatter gather list for fmr
3234  */
3235 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3236 			   int sg_nents, unsigned int *sg_offset)
3237 {
3238 	struct irdma_mr *iwmr = to_iwmr(ibmr);
3239 
3240 	iwmr->npages = 0;
3241 
3242 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
3243 }
3244 
3245 /**
3246  * irdma_hwreg_mr - send cqp command for memory registration
3247  * @iwdev: irdma device
3248  * @iwmr: irdma mr pointer
3249  * @access: access for MR
3250  */
3251 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
3252 			  u16 access)
3253 {
3254 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3255 	struct irdma_reg_ns_stag_info *stag_info;
3256 	struct ib_pd *pd = iwmr->ibmr.pd;
3257 	struct irdma_pd *iwpd = to_iwpd(pd);
3258 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3259 	struct irdma_cqp_request *cqp_request;
3260 	struct cqp_cmds_info *cqp_info;
3261 	int ret;
3262 
3263 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3264 	if (!cqp_request)
3265 		return -ENOMEM;
3266 
3267 	cqp_info = &cqp_request->info;
3268 	stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
3269 	stag_info->va = iwpbl->user_base;
3270 	stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
3271 	stag_info->stag_key = (u8)iwmr->stag;
3272 	stag_info->total_len = iwmr->len;
3273 	stag_info->access_rights = irdma_get_mr_access(access,
3274 						       iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
3275 	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
3276 		stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3277 	stag_info->pd_id = iwpd->sc_pd.pd_id;
3278 	stag_info->all_memory = iwmr->dma_mr;
3279 	if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
3280 		stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
3281 	else
3282 		stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3283 	stag_info->page_size = iwmr->page_size;
3284 
3285 	if (iwpbl->pbl_allocated) {
3286 		if (palloc->level == PBLE_LEVEL_1) {
3287 			stag_info->first_pm_pbl_index = palloc->level1.idx;
3288 			stag_info->chunk_size = 1;
3289 		} else {
3290 			stag_info->first_pm_pbl_index = palloc->level2.root.idx;
3291 			stag_info->chunk_size = 3;
3292 		}
3293 	} else {
3294 		stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
3295 	}
3296 
3297 	cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
3298 	cqp_info->post_sq = 1;
3299 	cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
3300 	cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
3301 	ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3302 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3303 
3304 	if (!ret)
3305 		iwmr->is_hwreg = true;
3306 
3307 	return ret;
3308 }
3309 
3310 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
3311 				      bool create_stag)
3312 {
3313 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3314 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3315 	u32 stag = 0;
3316 	u8 lvl;
3317 	int err;
3318 
3319 	lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
3320 
3321 	err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
3322 	if (err)
3323 		return err;
3324 
3325 	if (lvl) {
3326 		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
3327 						iwmr->page_size);
3328 		if (err) {
3329 			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
3330 			iwpbl->pbl_allocated = false;
3331 		}
3332 	}
3333 
3334 	if (create_stag) {
3335 		stag = irdma_create_stag(iwdev);
3336 		if (!stag) {
3337 			err = -ENOMEM;
3338 			goto free_pble;
3339 		}
3340 
3341 		iwmr->stag = stag;
3342 		iwmr->ibmr.rkey = stag;
3343 		iwmr->ibmr.lkey = stag;
3344 	}
3345 
3346 	err = irdma_hwreg_mr(iwdev, iwmr, access);
3347 	if (err)
3348 		goto err_hwreg;
3349 
3350 	return 0;
3351 
3352 err_hwreg:
3353 	if (stag)
3354 		irdma_free_stag(iwdev, stag);
3355 
3356 free_pble:
3357 	if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
3358 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
3359 
3360 	return err;
3361 }
3362 
3363 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
3364 					 struct ib_pd *pd, u64 virt,
3365 					 enum irdma_memreg_type reg_type)
3366 {
3367 	struct irdma_device *iwdev = to_iwdev(pd->device);
3368 	struct irdma_pbl *iwpbl;
3369 	struct irdma_mr *iwmr;
3370 	unsigned long pgsz_bitmap;
3371 
3372 	iwmr = kzalloc_obj(*iwmr);
3373 	if (!iwmr)
3374 		return ERR_PTR(-ENOMEM);
3375 
3376 	iwpbl = &iwmr->iwpbl;
3377 	iwpbl->iwmr = iwmr;
3378 	iwmr->region = region;
3379 	iwmr->ibmr.pd = pd;
3380 	iwmr->ibmr.device = pd->device;
3381 	iwmr->ibmr.iova = virt;
3382 	iwmr->type = reg_type;
3383 
3384 	pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
3385 		iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
3386 
3387 	iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
3388 	if (unlikely(!iwmr->page_size)) {
3389 		kfree(iwmr);
3390 		return ERR_PTR(-EOPNOTSUPP);
3391 	}
3392 
3393 	iwmr->len = region->length;
3394 	iwpbl->user_base = virt;
3395 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
3396 
3397 	return iwmr;
3398 }
3399 
3400 static void irdma_free_iwmr(struct irdma_mr *iwmr)
3401 {
3402 	kfree(iwmr);
3403 }
3404 
3405 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
3406 				     struct ib_udata *udata,
3407 				     struct irdma_mr *iwmr)
3408 {
3409 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3410 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3411 	struct irdma_ucontext *ucontext = NULL;
3412 	unsigned long flags;
3413 	u32 total;
3414 	int err;
3415 	u8 lvl;
3416 
3417 	/* iWarp: Catch page not starting on OS page boundary */
3418 	if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
3419 	    ib_umem_offset(iwmr->region))
3420 		return -EINVAL;
3421 
3422 	total = req.sq_pages + req.rq_pages + 1;
3423 	if (total > iwmr->page_cnt)
3424 		return -EINVAL;
3425 
3426 	total = req.sq_pages + req.rq_pages;
3427 	lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
3428 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
3429 	if (err)
3430 		return err;
3431 
3432 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
3433 					     ibucontext);
3434 	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3435 	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
3436 	iwpbl->on_list = true;
3437 	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3438 
3439 	return 0;
3440 }
3441 
3442 static int irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req,
3443 				      struct ib_udata *udata,
3444 				      struct irdma_mr *iwmr)
3445 {
3446 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3447 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3448 	struct irdma_ucontext *ucontext;
3449 	unsigned long flags;
3450 	u32 total;
3451 	int err;
3452 	u8 lvl;
3453 
3454 	total = req.rq_pages + IRDMA_SHADOW_PGCNT;
3455 	if (total > iwmr->page_cnt)
3456 		return -EINVAL;
3457 
3458 	lvl = req.rq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
3459 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
3460 	if (err)
3461 		return err;
3462 
3463 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
3464 					     ibucontext);
3465 	spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
3466 	list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list);
3467 	iwpbl->on_list = true;
3468 	spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
3469 
3470 	return 0;
3471 }
3472 
3473 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
3474 				     struct ib_udata *udata,
3475 				     struct irdma_mr *iwmr)
3476 {
3477 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3478 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3479 	struct irdma_ucontext *ucontext = NULL;
3480 	u8 shadow_pgcnt = 1;
3481 	unsigned long flags;
3482 	u32 total;
3483 	int err;
3484 	u8 lvl;
3485 
3486 	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
3487 		shadow_pgcnt = 0;
3488 	total = req.cq_pages + shadow_pgcnt;
3489 	if (total > iwmr->page_cnt)
3490 		return -EINVAL;
3491 
3492 	lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
3493 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
3494 	if (err)
3495 		return err;
3496 
3497 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
3498 					     ibucontext);
3499 	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3500 	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
3501 	iwpbl->on_list = true;
3502 	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3503 
3504 	return 0;
3505 }
3506 
3507 /**
3508  * irdma_reg_user_mr - Register a user memory region
3509  * @pd: ptr of pd
3510  * @start: virtual start address
3511  * @len: length of mr
3512  * @virt: virtual address
3513  * @access: access of mr
3514  * @dmah: dma handle
3515  * @udata: user data
3516  */
3517 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
3518 				       u64 virt, int access,
3519 				       struct ib_dmah *dmah,
3520 				       struct ib_udata *udata)
3521 {
3522 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
3523 	struct irdma_device *iwdev = to_iwdev(pd->device);
3524 	struct irdma_mem_reg_req req = {};
3525 	struct ib_umem *region = NULL;
3526 	struct irdma_mr *iwmr = NULL;
3527 	int err;
3528 
3529 	if (dmah)
3530 		return ERR_PTR(-EOPNOTSUPP);
3531 
3532 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3533 		return ERR_PTR(-EINVAL);
3534 
3535 	if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
3536 		return ERR_PTR(-EINVAL);
3537 
3538 	region = ib_umem_get(pd->device, start, len, access);
3539 
3540 	if (IS_ERR(region)) {
3541 		ibdev_dbg(&iwdev->ibdev,
3542 			  "VERBS: Failed to create ib_umem region\n");
3543 		return (struct ib_mr *)region;
3544 	}
3545 
3546 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
3547 		ib_umem_release(region);
3548 		return ERR_PTR(-EFAULT);
3549 	}
3550 
3551 	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
3552 	if (IS_ERR(iwmr)) {
3553 		ib_umem_release(region);
3554 		return (struct ib_mr *)iwmr;
3555 	}
3556 
3557 	switch (req.reg_type) {
3558 	case IRDMA_MEMREG_TYPE_QP:
3559 		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
3560 		if (err)
3561 			goto error;
3562 
3563 		break;
3564 	case IRDMA_MEMREG_TYPE_SRQ:
3565 		err = irdma_reg_user_mr_type_srq(req, udata, iwmr);
3566 		if (err)
3567 			goto error;
3568 
3569 		break;
3570 	case IRDMA_MEMREG_TYPE_CQ:
3571 		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
3572 		if (err)
3573 			goto error;
3574 		break;
3575 	case IRDMA_MEMREG_TYPE_MEM:
3576 		err = irdma_reg_user_mr_type_mem(iwmr, access, true);
3577 		if (err)
3578 			goto error;
3579 
3580 		break;
3581 	default:
3582 		err = -EINVAL;
3583 		goto error;
3584 	}
3585 
3586 	return &iwmr->ibmr;
3587 error:
3588 	ib_umem_release(region);
3589 	irdma_free_iwmr(iwmr);
3590 
3591 	return ERR_PTR(err);
3592 }
3593 
3594 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
3595 					      u64 len, u64 virt,
3596 					      int fd, int access,
3597 					      struct ib_dmah *dmah,
3598 					      struct uverbs_attr_bundle *attrs)
3599 {
3600 	struct irdma_device *iwdev = to_iwdev(pd->device);
3601 	struct ib_umem_dmabuf *umem_dmabuf;
3602 	struct irdma_mr *iwmr;
3603 	int err;
3604 
3605 	if (dmah)
3606 		return ERR_PTR(-EOPNOTSUPP);
3607 
3608 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3609 		return ERR_PTR(-EINVAL);
3610 
3611 	umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
3612 	if (IS_ERR(umem_dmabuf)) {
3613 		ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n",
3614 			  umem_dmabuf);
3615 		return ERR_CAST(umem_dmabuf);
3616 	}
3617 
3618 	iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
3619 	if (IS_ERR(iwmr)) {
3620 		err = PTR_ERR(iwmr);
3621 		goto err_release;
3622 	}
3623 
3624 	err = irdma_reg_user_mr_type_mem(iwmr, access, true);
3625 	if (err)
3626 		goto err_iwmr;
3627 
3628 	return &iwmr->ibmr;
3629 
3630 err_iwmr:
3631 	irdma_free_iwmr(iwmr);
3632 
3633 err_release:
3634 	ib_umem_release(&umem_dmabuf->umem);
3635 
3636 	return ERR_PTR(err);
3637 }
3638 
3639 static int irdma_hwdereg_mr(struct ib_mr *ib_mr)
3640 {
3641 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3642 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3643 	struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
3644 	struct irdma_dealloc_stag_info *info;
3645 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3646 	struct irdma_cqp_request *cqp_request;
3647 	struct cqp_cmds_info *cqp_info;
3648 	int status;
3649 
3650 	/* Skip HW MR de-register when it is already de-registered
3651 	 * during an MR re-reregister and the re-registration fails
3652 	 */
3653 	if (!iwmr->is_hwreg)
3654 		return 0;
3655 
3656 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3657 	if (!cqp_request)
3658 		return -ENOMEM;
3659 
3660 	cqp_info = &cqp_request->info;
3661 	info = &cqp_info->in.u.dealloc_stag.info;
3662 	info->pd_id = iwpd->sc_pd.pd_id;
3663 	info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3664 	info->mr = true;
3665 	if (iwpbl->pbl_allocated)
3666 		info->dealloc_pbl = true;
3667 
3668 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3669 	cqp_info->post_sq = 1;
3670 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3671 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3672 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3673 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3674 	if (status)
3675 		return status;
3676 
3677 	iwmr->is_hwreg = false;
3678 	return 0;
3679 }
3680 
3681 /*
3682  * irdma_rereg_mr_trans - Re-register a user MR for a change translation.
3683  * @iwmr: ptr of iwmr
3684  * @start: virtual start address
3685  * @len: length of mr
3686  * @virt: virtual address
3687  *
3688  * Re-register a user memory region when a change translation is requested.
3689  * Re-register a new region while reusing the stag from the original registration.
3690  */
3691 static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
3692 				u64 virt)
3693 {
3694 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3695 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3696 	struct ib_pd *pd = iwmr->ibmr.pd;
3697 	struct ib_umem *region;
3698 	int err;
3699 
3700 	region = ib_umem_get(pd->device, start, len, iwmr->access);
3701 	if (IS_ERR(region))
3702 		return PTR_ERR(region);
3703 
3704 	iwmr->region = region;
3705 	iwmr->ibmr.iova = virt;
3706 	iwmr->ibmr.pd = pd;
3707 	iwmr->page_size = ib_umem_find_best_pgsz(region,
3708 				iwdev->rf->sc_dev.hw_attrs.page_size_cap,
3709 				virt);
3710 	if (unlikely(!iwmr->page_size)) {
3711 		err = -EOPNOTSUPP;
3712 		goto err;
3713 	}
3714 
3715 	iwmr->len = region->length;
3716 	iwpbl->user_base = virt;
3717 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
3718 
3719 	err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false);
3720 	if (err)
3721 		goto err;
3722 
3723 	return 0;
3724 
3725 err:
3726 	ib_umem_release(region);
3727 	iwmr->region = NULL;
3728 	return err;
3729 }
3730 
3731 /*
3732  *  irdma_rereg_user_mr - Re-Register a user memory region(MR)
3733  *  @ibmr: ib mem to access iwarp mr pointer
3734  *  @flags: bit mask to indicate which of the attr's of MR modified
3735  *  @start: virtual start address
3736  *  @len: length of mr
3737  *  @virt: virtual address
3738  *  @new_access: bit mask of access flags
3739  *  @new_pd: ptr of pd
3740  *  @udata: user data
3741  *
3742  *  Return:
3743  *  NULL - Success, existing MR updated
3744  *  ERR_PTR - error occurred
3745  */
3746 static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags,
3747 					 u64 start, u64 len, u64 virt,
3748 					 int new_access, struct ib_pd *new_pd,
3749 					 struct ib_udata *udata)
3750 {
3751 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3752 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3753 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3754 	int ret;
3755 
3756 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3757 		return ERR_PTR(-EINVAL);
3758 
3759 	if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
3760 		return ERR_PTR(-EOPNOTSUPP);
3761 
3762 	ret = irdma_hwdereg_mr(ib_mr);
3763 	if (ret)
3764 		return ERR_PTR(ret);
3765 
3766 	if (flags & IB_MR_REREG_ACCESS)
3767 		iwmr->access = new_access;
3768 
3769 	if (flags & IB_MR_REREG_PD) {
3770 		iwmr->ibmr.pd = new_pd;
3771 		iwmr->ibmr.device = new_pd->device;
3772 	}
3773 
3774 	if (flags & IB_MR_REREG_TRANS) {
3775 		if (iwpbl->pbl_allocated) {
3776 			irdma_free_pble(iwdev->rf->pble_rsrc,
3777 					&iwpbl->pble_alloc);
3778 			iwpbl->pbl_allocated = false;
3779 		}
3780 		if (iwmr->region) {
3781 			ib_umem_release(iwmr->region);
3782 			iwmr->region = NULL;
3783 		}
3784 
3785 		ret = irdma_rereg_mr_trans(iwmr, start, len, virt);
3786 	} else
3787 		ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
3788 	if (ret)
3789 		return ERR_PTR(ret);
3790 
3791 	return NULL;
3792 }
3793 
3794 /**
3795  * irdma_reg_phys_mr - register kernel physical memory
3796  * @pd: ibpd pointer
3797  * @addr: physical address of memory to register
3798  * @size: size of memory to register
3799  * @access: Access rights
3800  * @iova_start: start of virtual address for physical buffers
3801  * @dma_mr: Flag indicating whether this region is a PD DMA MR
3802  */
3803 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
3804 				u64 *iova_start, bool dma_mr)
3805 {
3806 	struct irdma_device *iwdev = to_iwdev(pd->device);
3807 	struct irdma_pbl *iwpbl;
3808 	struct irdma_mr *iwmr;
3809 	u32 stag;
3810 	int ret;
3811 
3812 	iwmr = kzalloc_obj(*iwmr);
3813 	if (!iwmr)
3814 		return ERR_PTR(-ENOMEM);
3815 
3816 	iwmr->ibmr.pd = pd;
3817 	iwmr->ibmr.device = pd->device;
3818 	iwpbl = &iwmr->iwpbl;
3819 	iwpbl->iwmr = iwmr;
3820 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3821 	iwmr->dma_mr = dma_mr;
3822 	iwpbl->user_base = *iova_start;
3823 	stag = irdma_create_stag(iwdev);
3824 	if (!stag) {
3825 		ret = -ENOMEM;
3826 		goto err;
3827 	}
3828 
3829 	iwmr->stag = stag;
3830 	iwmr->ibmr.iova = *iova_start;
3831 	iwmr->ibmr.rkey = stag;
3832 	iwmr->ibmr.lkey = stag;
3833 	iwmr->page_cnt = 1;
3834 	iwmr->pgaddrmem[0] = addr;
3835 	iwmr->len = size;
3836 	iwmr->page_size = SZ_4K;
3837 	ret = irdma_hwreg_mr(iwdev, iwmr, access);
3838 	if (ret) {
3839 		irdma_free_stag(iwdev, stag);
3840 		goto err;
3841 	}
3842 
3843 	return &iwmr->ibmr;
3844 
3845 err:
3846 	kfree(iwmr);
3847 
3848 	return ERR_PTR(ret);
3849 }
3850 
3851 /**
3852  * irdma_get_dma_mr - register physical mem
3853  * @pd: ptr of pd
3854  * @acc: access for memory
3855  */
3856 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
3857 {
3858 	u64 kva = 0;
3859 
3860 	return irdma_reg_phys_mr(pd, 0, 0, acc, &kva, true);
3861 }
3862 
3863 /**
3864  * irdma_del_memlist - Deleting pbl list entries for CQ/QP
3865  * @iwmr: iwmr for IB's user page addresses
3866  * @ucontext: ptr to user context
3867  */
3868 static void irdma_del_memlist(struct irdma_mr *iwmr,
3869 			      struct irdma_ucontext *ucontext)
3870 {
3871 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3872 	unsigned long flags;
3873 
3874 	switch (iwmr->type) {
3875 	case IRDMA_MEMREG_TYPE_CQ:
3876 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3877 		if (iwpbl->on_list) {
3878 			iwpbl->on_list = false;
3879 			list_del(&iwpbl->list);
3880 		}
3881 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3882 		break;
3883 	case IRDMA_MEMREG_TYPE_QP:
3884 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3885 		if (iwpbl->on_list) {
3886 			iwpbl->on_list = false;
3887 			list_del(&iwpbl->list);
3888 		}
3889 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3890 		break;
3891 	case IRDMA_MEMREG_TYPE_SRQ:
3892 		spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
3893 		if (iwpbl->on_list) {
3894 			iwpbl->on_list = false;
3895 			list_del(&iwpbl->list);
3896 		}
3897 		spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
3898 		break;
3899 	default:
3900 		break;
3901 	}
3902 }
3903 
3904 /**
3905  * irdma_dereg_mr - deregister mr
3906  * @ib_mr: mr ptr for dereg
3907  * @udata: user data
3908  */
3909 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3910 {
3911 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3912 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3913 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3914 	int ret;
3915 
3916 	if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3917 		if (iwmr->region) {
3918 			struct irdma_ucontext *ucontext;
3919 
3920 			ucontext = rdma_udata_to_drv_context(udata,
3921 						struct irdma_ucontext,
3922 						ibucontext);
3923 			irdma_del_memlist(iwmr, ucontext);
3924 		}
3925 		goto done;
3926 	}
3927 
3928 	ret = irdma_hwdereg_mr(ib_mr);
3929 	if (ret)
3930 		return ret;
3931 
3932 	irdma_free_stag(iwdev, iwmr->stag);
3933 done:
3934 	if (iwpbl->pbl_allocated)
3935 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
3936 
3937 	if (iwmr->region)
3938 		ib_umem_release(iwmr->region);
3939 
3940 	kfree(iwmr);
3941 
3942 	return 0;
3943 }
3944 
3945 /**
3946  * irdma_post_send -  kernel application wr
3947  * @ibqp: qp ptr for wr
3948  * @ib_wr: work request ptr
3949  * @bad_wr: return of bad wr if err
3950  */
3951 static int irdma_post_send(struct ib_qp *ibqp,
3952 			   const struct ib_send_wr *ib_wr,
3953 			   const struct ib_send_wr **bad_wr)
3954 {
3955 	struct irdma_qp *iwqp;
3956 	struct irdma_qp_uk *ukqp;
3957 	struct irdma_sc_dev *dev;
3958 	struct irdma_post_sq_info info;
3959 	int err = 0;
3960 	unsigned long flags;
3961 	bool inv_stag;
3962 	struct irdma_ah *ah;
3963 
3964 	iwqp = to_iwqp(ibqp);
3965 	ukqp = &iwqp->sc_qp.qp_uk;
3966 	dev = &iwqp->iwdev->rf->sc_dev;
3967 
3968 	spin_lock_irqsave(&iwqp->lock, flags);
3969 	while (ib_wr) {
3970 		memset(&info, 0, sizeof(info));
3971 		inv_stag = false;
3972 		info.wr_id = (ib_wr->wr_id);
3973 		if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3974 			info.signaled = true;
3975 		if (ib_wr->send_flags & IB_SEND_FENCE)
3976 			info.read_fence = true;
3977 		switch (ib_wr->opcode) {
3978 		case IB_WR_ATOMIC_CMP_AND_SWP:
3979 			if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
3980 				       IRDMA_FEATURE_ATOMIC_OPS))) {
3981 				err = -EINVAL;
3982 				break;
3983 			}
3984 			info.op_type = IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP;
3985 			info.op.atomic_compare_swap.tagged_offset = ib_wr->sg_list[0].addr;
3986 			info.op.atomic_compare_swap.remote_tagged_offset =
3987 				atomic_wr(ib_wr)->remote_addr;
3988 			info.op.atomic_compare_swap.swap_data_bytes = atomic_wr(ib_wr)->swap;
3989 			info.op.atomic_compare_swap.compare_data_bytes =
3990 				atomic_wr(ib_wr)->compare_add;
3991 			info.op.atomic_compare_swap.stag = ib_wr->sg_list[0].lkey;
3992 			info.op.atomic_compare_swap.remote_stag = atomic_wr(ib_wr)->rkey;
3993 			err = irdma_uk_atomic_compare_swap(ukqp, &info, false);
3994 			break;
3995 		case IB_WR_ATOMIC_FETCH_AND_ADD:
3996 			if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
3997 				       IRDMA_FEATURE_ATOMIC_OPS))) {
3998 				err = -EINVAL;
3999 				break;
4000 			}
4001 			info.op_type = IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD;
4002 			info.op.atomic_fetch_add.tagged_offset = ib_wr->sg_list[0].addr;
4003 			info.op.atomic_fetch_add.remote_tagged_offset =
4004 				atomic_wr(ib_wr)->remote_addr;
4005 			info.op.atomic_fetch_add.fetch_add_data_bytes =
4006 				atomic_wr(ib_wr)->compare_add;
4007 			info.op.atomic_fetch_add.stag = ib_wr->sg_list[0].lkey;
4008 			info.op.atomic_fetch_add.remote_stag =
4009 				atomic_wr(ib_wr)->rkey;
4010 			err = irdma_uk_atomic_fetch_add(ukqp, &info, false);
4011 			break;
4012 		case IB_WR_SEND_WITH_IMM:
4013 			if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
4014 				info.imm_data_valid = true;
4015 				info.imm_data = ntohl(ib_wr->ex.imm_data);
4016 			} else {
4017 				err = -EINVAL;
4018 				break;
4019 			}
4020 			fallthrough;
4021 		case IB_WR_SEND:
4022 		case IB_WR_SEND_WITH_INV:
4023 			if (ib_wr->opcode == IB_WR_SEND ||
4024 			    ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
4025 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
4026 					info.op_type = IRDMA_OP_TYPE_SEND_SOL;
4027 				else
4028 					info.op_type = IRDMA_OP_TYPE_SEND;
4029 			} else {
4030 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
4031 					info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
4032 				else
4033 					info.op_type = IRDMA_OP_TYPE_SEND_INV;
4034 				info.stag_to_inv = ib_wr->ex.invalidate_rkey;
4035 			}
4036 
4037 			info.op.send.num_sges = ib_wr->num_sge;
4038 			info.op.send.sg_list = ib_wr->sg_list;
4039 			if (iwqp->ibqp.qp_type == IB_QPT_UD ||
4040 			    iwqp->ibqp.qp_type == IB_QPT_GSI) {
4041 				ah = to_iwah(ud_wr(ib_wr)->ah);
4042 				info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
4043 				info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
4044 				info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
4045 			}
4046 
4047 			if (ib_wr->send_flags & IB_SEND_INLINE)
4048 				err = irdma_uk_inline_send(ukqp, &info, false);
4049 			else
4050 				err = irdma_uk_send(ukqp, &info, false);
4051 			break;
4052 		case IB_WR_RDMA_WRITE_WITH_IMM:
4053 			if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
4054 				info.imm_data_valid = true;
4055 				info.imm_data = ntohl(ib_wr->ex.imm_data);
4056 			} else {
4057 				err = -EINVAL;
4058 				break;
4059 			}
4060 			fallthrough;
4061 		case IB_WR_RDMA_WRITE:
4062 			if (ib_wr->send_flags & IB_SEND_SOLICITED)
4063 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
4064 			else
4065 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
4066 
4067 			info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
4068 			info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
4069 			info.op.rdma_write.rem_addr.addr =
4070 				rdma_wr(ib_wr)->remote_addr;
4071 			info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
4072 			if (ib_wr->send_flags & IB_SEND_INLINE)
4073 				err = irdma_uk_inline_rdma_write(ukqp, &info, false);
4074 			else
4075 				err = irdma_uk_rdma_write(ukqp, &info, false);
4076 			break;
4077 		case IB_WR_RDMA_READ_WITH_INV:
4078 			inv_stag = true;
4079 			fallthrough;
4080 		case IB_WR_RDMA_READ:
4081 			if (ib_wr->num_sge >
4082 			    dev->hw_attrs.uk_attrs.max_hw_read_sges) {
4083 				err = -EINVAL;
4084 				break;
4085 			}
4086 			info.op_type = IRDMA_OP_TYPE_RDMA_READ;
4087 			info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
4088 			info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
4089 			info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
4090 			info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
4091 			err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
4092 			break;
4093 		case IB_WR_LOCAL_INV:
4094 			info.op_type = IRDMA_OP_TYPE_INV_STAG;
4095 			info.local_fence = true;
4096 			info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
4097 			err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
4098 			break;
4099 		case IB_WR_REG_MR: {
4100 			struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
4101 			struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
4102 			struct irdma_fast_reg_stag_info stag_info = {};
4103 
4104 			stag_info.signaled = info.signaled;
4105 			stag_info.read_fence = info.read_fence;
4106 			stag_info.access_rights =
4107 				irdma_get_mr_access(reg_wr(ib_wr)->access,
4108 						    dev->hw_attrs.uk_attrs.hw_rev);
4109 			stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
4110 			stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
4111 			stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
4112 			stag_info.wr_id = ib_wr->wr_id;
4113 			stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
4114 			stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
4115 			stag_info.total_len = iwmr->ibmr.length;
4116 			stag_info.reg_addr_pa = *palloc->level1.addr;
4117 			stag_info.first_pm_pbl_index = palloc->level1.idx;
4118 			stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
4119 			if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
4120 				stag_info.chunk_size = 1;
4121 			err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
4122 							true);
4123 			break;
4124 		}
4125 		default:
4126 			err = -EINVAL;
4127 			ibdev_dbg(&iwqp->iwdev->ibdev,
4128 				  "VERBS: upost_send bad opcode = 0x%x\n",
4129 				  ib_wr->opcode);
4130 			break;
4131 		}
4132 
4133 		if (err)
4134 			break;
4135 		ib_wr = ib_wr->next;
4136 	}
4137 
4138 	if (!iwqp->flush_issued) {
4139 		if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
4140 			irdma_uk_qp_post_wr(ukqp);
4141 		spin_unlock_irqrestore(&iwqp->lock, flags);
4142 	} else {
4143 		spin_unlock_irqrestore(&iwqp->lock, flags);
4144 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
4145 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
4146 	}
4147 
4148 	if (err)
4149 		*bad_wr = ib_wr;
4150 
4151 	return err;
4152 }
4153 
4154 /**
4155  * irdma_post_srq_recv - post receive wr for kernel application
4156  * @ibsrq: ib srq pointer
4157  * @ib_wr: work request for receive
4158  * @bad_wr: bad wr caused an error
4159  */
4160 static int irdma_post_srq_recv(struct ib_srq *ibsrq,
4161 			       const struct ib_recv_wr *ib_wr,
4162 			       const struct ib_recv_wr **bad_wr)
4163 {
4164 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
4165 	struct irdma_srq_uk *uksrq = &iwsrq->sc_srq.srq_uk;
4166 	struct irdma_post_rq_info post_recv = {};
4167 	unsigned long flags;
4168 	int err = 0;
4169 
4170 	spin_lock_irqsave(&iwsrq->lock, flags);
4171 	while (ib_wr) {
4172 		if (ib_wr->num_sge > uksrq->max_srq_frag_cnt) {
4173 			err = -EINVAL;
4174 			goto out;
4175 		}
4176 		post_recv.num_sges = ib_wr->num_sge;
4177 		post_recv.wr_id = ib_wr->wr_id;
4178 		post_recv.sg_list = ib_wr->sg_list;
4179 		err = irdma_uk_srq_post_receive(uksrq, &post_recv);
4180 		if (err)
4181 			goto out;
4182 
4183 		ib_wr = ib_wr->next;
4184 	}
4185 
4186 out:
4187 	spin_unlock_irqrestore(&iwsrq->lock, flags);
4188 
4189 	if (err)
4190 		*bad_wr = ib_wr;
4191 
4192 	return err;
4193 }
4194 
4195 /**
4196  * irdma_post_recv - post receive wr for kernel application
4197  * @ibqp: ib qp pointer
4198  * @ib_wr: work request for receive
4199  * @bad_wr: bad wr caused an error
4200  */
4201 static int irdma_post_recv(struct ib_qp *ibqp,
4202 			   const struct ib_recv_wr *ib_wr,
4203 			   const struct ib_recv_wr **bad_wr)
4204 {
4205 	struct irdma_qp *iwqp;
4206 	struct irdma_qp_uk *ukqp;
4207 	struct irdma_post_rq_info post_recv = {};
4208 	unsigned long flags;
4209 	int err = 0;
4210 
4211 	iwqp = to_iwqp(ibqp);
4212 	ukqp = &iwqp->sc_qp.qp_uk;
4213 
4214 	if (ukqp->srq_uk) {
4215 		*bad_wr = ib_wr;
4216 		return -EINVAL;
4217 	}
4218 
4219 	spin_lock_irqsave(&iwqp->lock, flags);
4220 	while (ib_wr) {
4221 		post_recv.num_sges = ib_wr->num_sge;
4222 		post_recv.wr_id = ib_wr->wr_id;
4223 		post_recv.sg_list = ib_wr->sg_list;
4224 		err = irdma_uk_post_receive(ukqp, &post_recv);
4225 		if (err) {
4226 			ibdev_dbg(&iwqp->iwdev->ibdev,
4227 				  "VERBS: post_recv err %d\n", err);
4228 			goto out;
4229 		}
4230 
4231 		ib_wr = ib_wr->next;
4232 	}
4233 
4234 out:
4235 	spin_unlock_irqrestore(&iwqp->lock, flags);
4236 	if (iwqp->flush_issued)
4237 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
4238 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
4239 
4240 	if (err)
4241 		*bad_wr = ib_wr;
4242 
4243 	return err;
4244 }
4245 
4246 /**
4247  * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
4248  * @opcode: iwarp flush code
4249  */
4250 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
4251 {
4252 	switch (opcode) {
4253 	case FLUSH_PROT_ERR:
4254 		return IB_WC_LOC_PROT_ERR;
4255 	case FLUSH_REM_ACCESS_ERR:
4256 		return IB_WC_REM_ACCESS_ERR;
4257 	case FLUSH_LOC_QP_OP_ERR:
4258 		return IB_WC_LOC_QP_OP_ERR;
4259 	case FLUSH_REM_OP_ERR:
4260 		return IB_WC_REM_OP_ERR;
4261 	case FLUSH_LOC_LEN_ERR:
4262 		return IB_WC_LOC_LEN_ERR;
4263 	case FLUSH_GENERAL_ERR:
4264 		return IB_WC_WR_FLUSH_ERR;
4265 	case FLUSH_RETRY_EXC_ERR:
4266 		return IB_WC_RETRY_EXC_ERR;
4267 	case FLUSH_MW_BIND_ERR:
4268 		return IB_WC_MW_BIND_ERR;
4269 	case FLUSH_REM_INV_REQ_ERR:
4270 		return IB_WC_REM_INV_REQ_ERR;
4271 	case FLUSH_RNR_RETRY_EXC_ERR:
4272 		return IB_WC_RNR_RETRY_EXC_ERR;
4273 	case FLUSH_FATAL_ERR:
4274 	default:
4275 		return IB_WC_FATAL_ERR;
4276 	}
4277 }
4278 
4279 /**
4280  * irdma_process_cqe - process cqe info
4281  * @entry: processed cqe
4282  * @cq_poll_info: cqe info
4283  */
4284 static void irdma_process_cqe(struct ib_wc *entry,
4285 			      struct irdma_cq_poll_info *cq_poll_info)
4286 {
4287 	struct irdma_sc_qp *qp;
4288 
4289 	entry->wc_flags = 0;
4290 	entry->pkey_index = 0;
4291 	entry->wr_id = cq_poll_info->wr_id;
4292 
4293 	qp = cq_poll_info->qp_handle;
4294 	entry->qp = qp->qp_uk.back_qp;
4295 
4296 	if (cq_poll_info->error) {
4297 		entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
4298 				irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
4299 
4300 		entry->vendor_err = cq_poll_info->major_err << 16 |
4301 				    cq_poll_info->minor_err;
4302 	} else {
4303 		entry->status = IB_WC_SUCCESS;
4304 		if (cq_poll_info->imm_valid) {
4305 			entry->ex.imm_data = htonl(cq_poll_info->imm_data);
4306 			entry->wc_flags |= IB_WC_WITH_IMM;
4307 		}
4308 		if (cq_poll_info->ud_smac_valid) {
4309 			ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
4310 			entry->wc_flags |= IB_WC_WITH_SMAC;
4311 		}
4312 
4313 		if (cq_poll_info->ud_vlan_valid) {
4314 			u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
4315 
4316 			entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
4317 			if (vlan) {
4318 				entry->vlan_id = vlan;
4319 				entry->wc_flags |= IB_WC_WITH_VLAN;
4320 			}
4321 		} else {
4322 			entry->sl = 0;
4323 		}
4324 	}
4325 
4326 	if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
4327 		set_ib_wc_op_sq(cq_poll_info, entry);
4328 	} else {
4329 		if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
4330 			set_ib_wc_op_rq(cq_poll_info, entry,
4331 					qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
4332 					true : false);
4333 		else
4334 			set_ib_wc_op_rq_gen_3(cq_poll_info, entry);
4335 		if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
4336 		    cq_poll_info->stag_invalid_set) {
4337 			entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
4338 			entry->wc_flags |= IB_WC_WITH_INVALIDATE;
4339 		}
4340 	}
4341 
4342 	if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
4343 		entry->src_qp = cq_poll_info->ud_src_qpn;
4344 		entry->slid = 0;
4345 		entry->wc_flags |=
4346 			(IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
4347 		entry->network_hdr_type = cq_poll_info->ipv4 ?
4348 						  RDMA_NETWORK_IPV4 :
4349 						  RDMA_NETWORK_IPV6;
4350 	} else {
4351 		entry->src_qp = cq_poll_info->qp_id;
4352 	}
4353 
4354 	entry->byte_len = cq_poll_info->bytes_xfered;
4355 }
4356 
4357 /**
4358  * irdma_poll_one - poll one entry of the CQ
4359  * @ukcq: ukcq to poll
4360  * @cur_cqe: current CQE info to be filled in
4361  * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
4362  *
4363  * Returns the internal irdma device error code or 0 on success
4364  */
4365 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
4366 				 struct irdma_cq_poll_info *cur_cqe,
4367 				 struct ib_wc *entry)
4368 {
4369 	int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
4370 
4371 	if (ret)
4372 		return ret;
4373 
4374 	irdma_process_cqe(entry, cur_cqe);
4375 
4376 	return 0;
4377 }
4378 
4379 /**
4380  * __irdma_poll_cq - poll cq for completion (kernel apps)
4381  * @iwcq: cq to poll
4382  * @num_entries: number of entries to poll
4383  * @entry: wr of a completed entry
4384  */
4385 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
4386 {
4387 	struct list_head *tmp_node, *list_node;
4388 	struct irdma_cq_buf *last_buf = NULL;
4389 	struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
4390 	struct irdma_cq_buf *cq_buf;
4391 	int ret;
4392 	struct irdma_device *iwdev;
4393 	struct irdma_cq_uk *ukcq;
4394 	bool cq_new_cqe = false;
4395 	int resized_bufs = 0;
4396 	int npolled = 0;
4397 
4398 	iwdev = to_iwdev(iwcq->ibcq.device);
4399 	ukcq = &iwcq->sc_cq.cq_uk;
4400 
4401 	/* go through the list of previously resized CQ buffers */
4402 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
4403 		cq_buf = container_of(list_node, struct irdma_cq_buf, list);
4404 		while (npolled < num_entries) {
4405 			ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
4406 			if (!ret) {
4407 				++npolled;
4408 				cq_new_cqe = true;
4409 				continue;
4410 			}
4411 			if (ret == -ENOENT)
4412 				break;
4413 			 /* QP using the CQ is destroyed. Skip reporting this CQE */
4414 			if (ret == -EFAULT) {
4415 				cq_new_cqe = true;
4416 				continue;
4417 			}
4418 			goto error;
4419 		}
4420 
4421 		/* save the resized CQ buffer which received the last cqe */
4422 		if (cq_new_cqe)
4423 			last_buf = cq_buf;
4424 		cq_new_cqe = false;
4425 	}
4426 
4427 	/* check the current CQ for new cqes */
4428 	while (npolled < num_entries) {
4429 		ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
4430 		if (ret == -ENOENT) {
4431 			ret = irdma_generated_cmpls(iwcq, cur_cqe);
4432 			if (!ret)
4433 				irdma_process_cqe(entry + npolled, cur_cqe);
4434 		}
4435 		if (!ret) {
4436 			++npolled;
4437 			cq_new_cqe = true;
4438 			continue;
4439 		}
4440 
4441 		if (ret == -ENOENT)
4442 			break;
4443 		/* QP using the CQ is destroyed. Skip reporting this CQE */
4444 		if (ret == -EFAULT) {
4445 			cq_new_cqe = true;
4446 			continue;
4447 		}
4448 		goto error;
4449 	}
4450 
4451 	if (cq_new_cqe)
4452 		/* all previous CQ resizes are complete */
4453 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
4454 	else if (last_buf)
4455 		/* only CQ resizes up to the last_buf are complete */
4456 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
4457 	if (resized_bufs)
4458 		/* report to the HW the number of complete CQ resizes */
4459 		irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
4460 
4461 	return npolled;
4462 error:
4463 	ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
4464 		  __func__, ret);
4465 
4466 	return ret;
4467 }
4468 
4469 /**
4470  * irdma_poll_cq - poll cq for completion (kernel apps)
4471  * @ibcq: cq to poll
4472  * @num_entries: number of entries to poll
4473  * @entry: wr of a completed entry
4474  */
4475 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
4476 			 struct ib_wc *entry)
4477 {
4478 	struct irdma_cq *iwcq;
4479 	unsigned long flags;
4480 	int ret;
4481 
4482 	iwcq = to_iwcq(ibcq);
4483 
4484 	spin_lock_irqsave(&iwcq->lock, flags);
4485 	ret = __irdma_poll_cq(iwcq, num_entries, entry);
4486 	spin_unlock_irqrestore(&iwcq->lock, flags);
4487 
4488 	return ret;
4489 }
4490 
4491 /**
4492  * irdma_req_notify_cq - arm cq kernel application
4493  * @ibcq: cq to arm
4494  * @notify_flags: notofication flags
4495  */
4496 static int irdma_req_notify_cq(struct ib_cq *ibcq,
4497 			       enum ib_cq_notify_flags notify_flags)
4498 {
4499 	struct irdma_cq *iwcq;
4500 	struct irdma_cq_uk *ukcq;
4501 	unsigned long flags;
4502 	enum irdma_cmpl_notify cq_notify;
4503 	bool promo_event = false;
4504 	int ret = 0;
4505 
4506 	cq_notify = notify_flags == IB_CQ_SOLICITED ?
4507 		    IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
4508 	iwcq = to_iwcq(ibcq);
4509 	ukcq = &iwcq->sc_cq.cq_uk;
4510 
4511 	spin_lock_irqsave(&iwcq->lock, flags);
4512 	/* Only promote to arm the CQ for any event if the last arm event was solicited. */
4513 	if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
4514 		promo_event = true;
4515 
4516 	if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
4517 		iwcq->last_notify = cq_notify;
4518 		irdma_uk_cq_request_notification(ukcq, cq_notify);
4519 	}
4520 
4521 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
4522 	    (!irdma_uk_cq_empty(ukcq) || !list_empty(&iwcq->cmpl_generated)))
4523 		ret = 1;
4524 	spin_unlock_irqrestore(&iwcq->lock, flags);
4525 
4526 	return ret;
4527 }
4528 
4529 static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
4530 	/* gen1 - 32-bit */
4531 	[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name		= "ip4InDiscards",
4532 	[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name		= "ip4InTruncatedPkts",
4533 	[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name		= "ip4OutNoRoutes",
4534 	[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name		= "ip6InDiscards",
4535 	[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name		= "ip6InTruncatedPkts",
4536 	[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name		= "ip6OutNoRoutes",
4537 	[IRDMA_HW_STAT_INDEX_RXVLANERR].name		= "rxVlanErrors",
4538 	/* gen1 - 64-bit */
4539 	[IRDMA_HW_STAT_INDEX_IP4RXOCTS].name		= "ip4InOctets",
4540 	[IRDMA_HW_STAT_INDEX_IP4RXPKTS].name		= "ip4InPkts",
4541 	[IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name		= "ip4InReasmRqd",
4542 	[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name		= "ip4InMcastPkts",
4543 	[IRDMA_HW_STAT_INDEX_IP4TXOCTS].name		= "ip4OutOctets",
4544 	[IRDMA_HW_STAT_INDEX_IP4TXPKTS].name		= "ip4OutPkts",
4545 	[IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name		= "ip4OutSegRqd",
4546 	[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name		= "ip4OutMcastPkts",
4547 	[IRDMA_HW_STAT_INDEX_IP6RXOCTS].name		= "ip6InOctets",
4548 	[IRDMA_HW_STAT_INDEX_IP6RXPKTS].name		= "ip6InPkts",
4549 	[IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name		= "ip6InReasmRqd",
4550 	[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name		= "ip6InMcastPkts",
4551 	[IRDMA_HW_STAT_INDEX_IP6TXOCTS].name		= "ip6OutOctets",
4552 	[IRDMA_HW_STAT_INDEX_IP6TXPKTS].name		= "ip6OutPkts",
4553 	[IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name		= "ip6OutSegRqd",
4554 	[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name		= "ip6OutMcastPkts",
4555 	[IRDMA_HW_STAT_INDEX_RDMARXRDS].name		= "InRdmaReads",
4556 	[IRDMA_HW_STAT_INDEX_RDMARXSNDS].name		= "InRdmaSends",
4557 	[IRDMA_HW_STAT_INDEX_RDMARXWRS].name		= "InRdmaWrites",
4558 	[IRDMA_HW_STAT_INDEX_RDMATXRDS].name		= "OutRdmaReads",
4559 	[IRDMA_HW_STAT_INDEX_RDMATXSNDS].name		= "OutRdmaSends",
4560 	[IRDMA_HW_STAT_INDEX_RDMATXWRS].name		= "OutRdmaWrites",
4561 	[IRDMA_HW_STAT_INDEX_RDMAVBND].name		= "RdmaBnd",
4562 	[IRDMA_HW_STAT_INDEX_RDMAVINV].name		= "RdmaInv",
4563 
4564 	/* gen2 - 32-bit */
4565 	[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name	= "cnpHandled",
4566 	[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name	= "cnpIgnored",
4567 	[IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name		= "cnpSent",
4568 	/* gen2 - 64-bit */
4569 	[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name		= "ip4InMcastOctets",
4570 	[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name		= "ip4OutMcastOctets",
4571 	[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name		= "ip6InMcastOctets",
4572 	[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name		= "ip6OutMcastOctets",
4573 	[IRDMA_HW_STAT_INDEX_UDPRXPKTS].name		= "RxUDP",
4574 	[IRDMA_HW_STAT_INDEX_UDPTXPKTS].name		= "TxUDP",
4575 	[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name	= "RxECNMrkd",
4576 	[IRDMA_HW_STAT_INDEX_TCPRTXSEG].name		= "RetransSegs",
4577 	[IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name		= "InOptErrors",
4578 	[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name	= "InProtoErrors",
4579 	[IRDMA_HW_STAT_INDEX_TCPRXSEGS].name		= "InSegs",
4580 	[IRDMA_HW_STAT_INDEX_TCPTXSEG].name		= "OutSegs",
4581 
4582 	/* gen3 */
4583 	[IRDMA_HW_STAT_INDEX_RNR_SENT].name		= "RNR sent",
4584 	[IRDMA_HW_STAT_INDEX_RNR_RCVD].name		= "RNR received",
4585 	[IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT].name	= "ord limit count",
4586 	[IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT].name	= "ird limit count",
4587 	[IRDMA_HW_STAT_INDEX_RDMARXATS].name		= "Rx atomics",
4588 	[IRDMA_HW_STAT_INDEX_RDMATXATS].name		= "Tx atomics",
4589 	[IRDMA_HW_STAT_INDEX_NAKSEQERR].name		= "Nak Sequence Error",
4590 	[IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED].name	= "Nak Sequence Error Implied",
4591 	[IRDMA_HW_STAT_INDEX_RTO].name			= "RTO",
4592 	[IRDMA_HW_STAT_INDEX_RXOOOPKTS].name		= "Rcvd Out of order packets",
4593 	[IRDMA_HW_STAT_INDEX_ICRCERR].name		= "CRC errors",
4594 };
4595 
4596 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
4597 				     struct ib_port_immutable *immutable)
4598 {
4599 	struct ib_port_attr attr;
4600 	int err;
4601 
4602 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
4603 	err = ib_query_port(ibdev, port_num, &attr);
4604 	if (err)
4605 		return err;
4606 
4607 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
4608 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
4609 	immutable->gid_tbl_len = attr.gid_tbl_len;
4610 
4611 	return 0;
4612 }
4613 
4614 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
4615 				   struct ib_port_immutable *immutable)
4616 {
4617 	struct ib_port_attr attr;
4618 	int err;
4619 
4620 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
4621 	err = ib_query_port(ibdev, port_num, &attr);
4622 	if (err)
4623 		return err;
4624 	immutable->gid_tbl_len = attr.gid_tbl_len;
4625 
4626 	return 0;
4627 }
4628 
4629 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
4630 {
4631 	struct irdma_device *iwdev = to_iwdev(dev);
4632 
4633 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
4634 		 irdma_fw_major_ver(&iwdev->rf->sc_dev),
4635 		 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
4636 }
4637 
4638 /**
4639  * irdma_alloc_hw_port_stats - Allocate a hw stats structure
4640  * @ibdev: device pointer from stack
4641  * @port_num: port number
4642  */
4643 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
4644 						       u32 port_num)
4645 {
4646 	struct irdma_device *iwdev = to_iwdev(ibdev);
4647 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
4648 
4649 	int num_counters = dev->hw_attrs.max_stat_idx;
4650 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
4651 
4652 	return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
4653 					  lifespan);
4654 }
4655 
4656 /**
4657  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
4658  * @ibdev: device pointer from stack
4659  * @stats: stats pointer from stack
4660  * @port_num: port number
4661  * @index: which hw counter the stack is requesting we update
4662  */
4663 static int irdma_get_hw_stats(struct ib_device *ibdev,
4664 			      struct rdma_hw_stats *stats, u32 port_num,
4665 			      int index)
4666 {
4667 	struct irdma_device *iwdev = to_iwdev(ibdev);
4668 	struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
4669 
4670 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
4671 		irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
4672 	else
4673 		irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
4674 
4675 	memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
4676 
4677 	return stats->num_counters;
4678 }
4679 
4680 /**
4681  * irdma_query_gid - Query port GID
4682  * @ibdev: device pointer from stack
4683  * @port: port number
4684  * @index: Entry index
4685  * @gid: Global ID
4686  */
4687 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
4688 			   union ib_gid *gid)
4689 {
4690 	struct irdma_device *iwdev = to_iwdev(ibdev);
4691 
4692 	memset(gid->raw, 0, sizeof(gid->raw));
4693 	ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
4694 
4695 	return 0;
4696 }
4697 
4698 /**
4699  * mcast_list_add -  Add a new mcast item to list
4700  * @rf: RDMA PCI function
4701  * @new_elem: pointer to element to add
4702  */
4703 static void mcast_list_add(struct irdma_pci_f *rf,
4704 			   struct mc_table_list *new_elem)
4705 {
4706 	list_add(&new_elem->list, &rf->mc_qht_list.list);
4707 }
4708 
4709 /**
4710  * mcast_list_del - Remove an mcast item from list
4711  * @mc_qht_elem: pointer to mcast table list element
4712  */
4713 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
4714 {
4715 	if (mc_qht_elem)
4716 		list_del(&mc_qht_elem->list);
4717 }
4718 
4719 /**
4720  * mcast_list_lookup_ip - Search mcast list for address
4721  * @rf: RDMA PCI function
4722  * @ip_mcast: pointer to mcast IP address
4723  */
4724 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
4725 						  u32 *ip_mcast)
4726 {
4727 	struct mc_table_list *mc_qht_el;
4728 	struct list_head *pos, *q;
4729 
4730 	list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
4731 		mc_qht_el = list_entry(pos, struct mc_table_list, list);
4732 		if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
4733 			    sizeof(mc_qht_el->mc_info.dest_ip)))
4734 			return mc_qht_el;
4735 	}
4736 
4737 	return NULL;
4738 }
4739 
4740 /**
4741  * irdma_mcast_cqp_op - perform a mcast cqp operation
4742  * @iwdev: irdma device
4743  * @mc_grp_ctx: mcast group info
4744  * @op: operation
4745  *
4746  * returns error status
4747  */
4748 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
4749 			      struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
4750 {
4751 	struct cqp_cmds_info *cqp_info;
4752 	struct irdma_cqp_request *cqp_request;
4753 	int status;
4754 
4755 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
4756 	if (!cqp_request)
4757 		return -ENOMEM;
4758 
4759 	cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
4760 	cqp_info = &cqp_request->info;
4761 	cqp_info->cqp_cmd = op;
4762 	cqp_info->post_sq = 1;
4763 	cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
4764 	cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
4765 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
4766 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
4767 
4768 	return status;
4769 }
4770 
4771 /**
4772  * irdma_mcast_mac - Get the multicast MAC for an IP address
4773  * @ip_addr: IPv4 or IPv6 address
4774  * @mac: pointer to result MAC address
4775  * @ipv4: flag indicating IPv4 or IPv6
4776  *
4777  */
4778 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
4779 {
4780 	u8 *ip = (u8 *)ip_addr;
4781 
4782 	if (ipv4) {
4783 		unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
4784 						0x00, 0x00};
4785 
4786 		mac4[3] = ip[2] & 0x7F;
4787 		mac4[4] = ip[1];
4788 		mac4[5] = ip[0];
4789 		ether_addr_copy(mac, mac4);
4790 	} else {
4791 		unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
4792 						0x00, 0x00};
4793 
4794 		mac6[2] = ip[3];
4795 		mac6[3] = ip[2];
4796 		mac6[4] = ip[1];
4797 		mac6[5] = ip[0];
4798 		ether_addr_copy(mac, mac6);
4799 	}
4800 }
4801 
4802 /**
4803  * irdma_attach_mcast - attach a qp to a multicast group
4804  * @ibqp: ptr to qp
4805  * @ibgid: pointer to global ID
4806  * @lid: local ID
4807  *
4808  * returns error status
4809  */
4810 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4811 {
4812 	struct irdma_qp *iwqp = to_iwqp(ibqp);
4813 	struct irdma_device *iwdev = iwqp->iwdev;
4814 	struct irdma_pci_f *rf = iwdev->rf;
4815 	struct mc_table_list *mc_qht_elem;
4816 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4817 	unsigned long flags;
4818 	u32 ip_addr[4] = {};
4819 	u32 mgn;
4820 	u32 no_mgs;
4821 	int ret = 0;
4822 	bool ipv4;
4823 	u16 vlan_id;
4824 	union irdma_sockaddr sgid_addr;
4825 	unsigned char dmac[ETH_ALEN];
4826 
4827 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4828 
4829 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
4830 		irdma_copy_ip_ntohl(ip_addr,
4831 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4832 		irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
4833 		ipv4 = false;
4834 		ibdev_dbg(&iwdev->ibdev,
4835 			  "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
4836 			  ip_addr);
4837 		irdma_mcast_mac(ip_addr, dmac, false);
4838 	} else {
4839 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4840 		ipv4 = true;
4841 		vlan_id = irdma_get_vlan_ipv4(ip_addr);
4842 		irdma_mcast_mac(ip_addr, dmac, true);
4843 		ibdev_dbg(&iwdev->ibdev,
4844 			  "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
4845 			  ibqp->qp_num, ip_addr, dmac);
4846 	}
4847 
4848 	spin_lock_irqsave(&rf->qh_list_lock, flags);
4849 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4850 	if (!mc_qht_elem) {
4851 		struct irdma_dma_mem *dma_mem_mc;
4852 
4853 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4854 		mc_qht_elem = kzalloc_obj(*mc_qht_elem);
4855 		if (!mc_qht_elem)
4856 			return -ENOMEM;
4857 
4858 		mc_qht_elem->mc_info.ipv4_valid = ipv4;
4859 		memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
4860 		       sizeof(mc_qht_elem->mc_info.dest_ip));
4861 		ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
4862 				       &mgn, &rf->next_mcg);
4863 		if (ret) {
4864 			kfree(mc_qht_elem);
4865 			return -ENOMEM;
4866 		}
4867 
4868 		mc_qht_elem->mc_info.mgn = mgn;
4869 		dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4870 		dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4871 					 IRDMA_HW_PAGE_SIZE);
4872 		dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4873 						    dma_mem_mc->size,
4874 						    &dma_mem_mc->pa,
4875 						    GFP_KERNEL);
4876 		if (!dma_mem_mc->va) {
4877 			irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4878 			kfree(mc_qht_elem);
4879 			return -ENOMEM;
4880 		}
4881 
4882 		mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4883 		memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4884 		       sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4885 		mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4886 		mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4887 		if (vlan_id < VLAN_N_VID)
4888 			mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4889 		mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
4890 		mc_qht_elem->mc_grp_ctx.qs_handle =
4891 			iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4892 		ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4893 
4894 		spin_lock_irqsave(&rf->qh_list_lock, flags);
4895 		mcast_list_add(rf, mc_qht_elem);
4896 	} else {
4897 		if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4898 		    IRDMA_MAX_MGS_PER_CTX) {
4899 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4900 			return -ENOMEM;
4901 		}
4902 	}
4903 
4904 	mcg_info.qp_id = iwqp->ibqp.qp_num;
4905 	no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4906 	irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4907 	spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4908 
4909 	/* Only if there is a change do we need to modify or create */
4910 	if (!no_mgs) {
4911 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4912 					 IRDMA_OP_MC_CREATE);
4913 	} else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4914 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4915 					 IRDMA_OP_MC_MODIFY);
4916 	} else {
4917 		return 0;
4918 	}
4919 
4920 	if (ret)
4921 		goto error;
4922 
4923 	return 0;
4924 
4925 error:
4926 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4927 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4928 		mcast_list_del(mc_qht_elem);
4929 		dma_free_coherent(rf->hw.device,
4930 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4931 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4932 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4933 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4934 		irdma_free_rsrc(rf, rf->allocated_mcgs,
4935 				mc_qht_elem->mc_grp_ctx.mg_id);
4936 		kfree(mc_qht_elem);
4937 	}
4938 
4939 	return ret;
4940 }
4941 
4942 /**
4943  * irdma_detach_mcast - detach a qp from a multicast group
4944  * @ibqp: ptr to qp
4945  * @ibgid: pointer to global ID
4946  * @lid: local ID
4947  *
4948  * returns error status
4949  */
4950 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4951 {
4952 	struct irdma_qp *iwqp = to_iwqp(ibqp);
4953 	struct irdma_device *iwdev = iwqp->iwdev;
4954 	struct irdma_pci_f *rf = iwdev->rf;
4955 	u32 ip_addr[4] = {};
4956 	struct mc_table_list *mc_qht_elem;
4957 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4958 	int ret;
4959 	unsigned long flags;
4960 	union irdma_sockaddr sgid_addr;
4961 
4962 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4963 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4964 		irdma_copy_ip_ntohl(ip_addr,
4965 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4966 	else
4967 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4968 
4969 	spin_lock_irqsave(&rf->qh_list_lock, flags);
4970 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4971 	if (!mc_qht_elem) {
4972 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4973 		ibdev_dbg(&iwdev->ibdev,
4974 			  "VERBS: address not found MCG\n");
4975 		return 0;
4976 	}
4977 
4978 	mcg_info.qp_id = iwqp->ibqp.qp_num;
4979 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4980 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4981 		mcast_list_del(mc_qht_elem);
4982 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4983 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4984 					 IRDMA_OP_MC_DESTROY);
4985 		if (ret) {
4986 			ibdev_dbg(&iwdev->ibdev,
4987 				  "VERBS: failed MC_DESTROY MCG\n");
4988 			spin_lock_irqsave(&rf->qh_list_lock, flags);
4989 			mcast_list_add(rf, mc_qht_elem);
4990 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4991 			return -EAGAIN;
4992 		}
4993 
4994 		dma_free_coherent(rf->hw.device,
4995 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4996 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4997 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4998 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4999 		irdma_free_rsrc(rf, rf->allocated_mcgs,
5000 				mc_qht_elem->mc_grp_ctx.mg_id);
5001 		kfree(mc_qht_elem);
5002 	} else {
5003 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
5004 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
5005 					 IRDMA_OP_MC_MODIFY);
5006 		if (ret) {
5007 			ibdev_dbg(&iwdev->ibdev,
5008 				  "VERBS: failed Modify MCG\n");
5009 			return ret;
5010 		}
5011 	}
5012 
5013 	return 0;
5014 }
5015 
5016 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
5017 {
5018 	struct irdma_pci_f *rf = iwdev->rf;
5019 	int err;
5020 
5021 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
5022 			       &rf->next_ah);
5023 	if (err)
5024 		return err;
5025 
5026 	err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
5027 			      irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
5028 
5029 	if (err) {
5030 		ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
5031 		goto err_ah_create;
5032 	}
5033 
5034 	if (!sleep) {
5035 		const u64 tmout_ms = irdma_get_timeout_threshold(&rf->sc_dev) *
5036 			CQP_COMPL_WAIT_TIME_MS;
5037 
5038 		if (poll_timeout_us_atomic(irdma_cqp_ce_handler(rf,
5039 								&rf->ccq.sc_cq),
5040 					   ah->sc_ah.ah_info.ah_valid, 1,
5041 					   tmout_ms * USEC_PER_MSEC, false)) {
5042 			ibdev_dbg(&iwdev->ibdev,
5043 				  "VERBS: CQP create AH timed out");
5044 			err = -ETIMEDOUT;
5045 			goto err_ah_create;
5046 		}
5047 	}
5048 	return 0;
5049 
5050 err_ah_create:
5051 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
5052 
5053 	return err;
5054 }
5055 
5056 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
5057 {
5058 	struct irdma_pd *pd = to_iwpd(ibah->pd);
5059 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
5060 	struct rdma_ah_attr *ah_attr = attr->ah_attr;
5061 	const struct ib_gid_attr *sgid_attr;
5062 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
5063 	struct irdma_pci_f *rf = iwdev->rf;
5064 	struct irdma_sc_ah *sc_ah;
5065 	struct irdma_ah_info *ah_info;
5066 	union irdma_sockaddr sgid_addr, dgid_addr;
5067 	int err;
5068 	u8 dmac[ETH_ALEN];
5069 
5070 	ah->pd = pd;
5071 	sc_ah = &ah->sc_ah;
5072 	sc_ah->ah_info.vsi = &iwdev->vsi;
5073 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
5074 	ah->sgid_index = ah_attr->grh.sgid_index;
5075 	sgid_attr = ah_attr->grh.sgid_attr;
5076 	memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
5077 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
5078 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
5079 	ah->av.attrs = *ah_attr;
5080 	ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
5081 	ah_info = &sc_ah->ah_info;
5082 	ah_info->pd_idx = pd->sc_pd.pd_id;
5083 	if (ah_attr->ah_flags & IB_AH_GRH) {
5084 		ah_info->flow_label = ah_attr->grh.flow_label;
5085 		ah_info->hop_ttl = ah_attr->grh.hop_limit;
5086 		ah_info->tc_tos = ah_attr->grh.traffic_class;
5087 	}
5088 
5089 	ether_addr_copy(dmac, ah_attr->roce.dmac);
5090 	if (ah->av.net_type == RDMA_NETWORK_IPV4) {
5091 		ah_info->ipv4_valid = true;
5092 		ah_info->dest_ip_addr[0] =
5093 			ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
5094 		ah_info->src_ip_addr[0] =
5095 			ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
5096 		ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
5097 						     ah_info->dest_ip_addr[0]);
5098 		if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
5099 			ah_info->do_lpbk = true;
5100 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
5101 		}
5102 	} else {
5103 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
5104 				    dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
5105 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
5106 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
5107 		ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
5108 						     ah_info->dest_ip_addr);
5109 		if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
5110 			ah_info->do_lpbk = true;
5111 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
5112 		}
5113 	}
5114 
5115 	err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
5116 				      ah_info->mac_addr);
5117 	if (err)
5118 		return err;
5119 
5120 	ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
5121 					      ah_info->ipv4_valid, dmac);
5122 
5123 	if (ah_info->dst_arpindex == -1)
5124 		return -EINVAL;
5125 
5126 	if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
5127 		ah_info->vlan_tag = 0;
5128 
5129 	if (ah_info->vlan_tag < VLAN_N_VID) {
5130 		u8 prio = rt_tos2priority(ah_info->tc_tos);
5131 
5132 		prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
5133 
5134 		ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
5135 		ah_info->insert_vlan_tag = true;
5136 	}
5137 
5138 	return 0;
5139 }
5140 
5141 /**
5142  * irdma_ah_exists - Check for existing identical AH
5143  * @iwdev: irdma device
5144  * @new_ah: AH to check for
5145  *
5146  * returns true if AH is found, false if not found.
5147  */
5148 static bool irdma_ah_exists(struct irdma_device *iwdev,
5149 			    struct irdma_ah *new_ah)
5150 {
5151 	struct irdma_ah *ah;
5152 	u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
5153 		  new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
5154 		  new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
5155 		  new_ah->sc_ah.ah_info.dest_ip_addr[3];
5156 
5157 	hash_for_each_possible(iwdev->rf->ah_hash_tbl, ah, list, key) {
5158 		/* Set ah_valid and ah_id the same so memcmp can work */
5159 		new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
5160 		new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
5161 		if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
5162 			    sizeof(ah->sc_ah.ah_info))) {
5163 			refcount_inc(&ah->refcnt);
5164 			new_ah->parent_ah = ah;
5165 			return true;
5166 		}
5167 	}
5168 
5169 	return false;
5170 }
5171 
5172 /**
5173  * irdma_destroy_ah - Destroy address handle
5174  * @ibah: pointer to address handle
5175  * @ah_flags: flags for sleepable
5176  */
5177 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
5178 {
5179 	struct irdma_device *iwdev = to_iwdev(ibah->device);
5180 	struct irdma_ah *ah = to_iwah(ibah);
5181 
5182 	if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
5183 		mutex_lock(&iwdev->rf->ah_tbl_lock);
5184 		if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
5185 			mutex_unlock(&iwdev->rf->ah_tbl_lock);
5186 			return 0;
5187 		}
5188 		hash_del(&ah->parent_ah->list);
5189 		kfree(ah->parent_ah);
5190 		mutex_unlock(&iwdev->rf->ah_tbl_lock);
5191 	}
5192 
5193 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
5194 			false, NULL, ah);
5195 
5196 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
5197 			ah->sc_ah.ah_info.ah_idx);
5198 
5199 	return 0;
5200 }
5201 
5202 /**
5203  * irdma_create_user_ah - create user address handle
5204  * @ibah: address handle
5205  * @attr: address handle attributes
5206  * @udata: User data
5207  *
5208  * returns 0 on success, error otherwise
5209  */
5210 static int irdma_create_user_ah(struct ib_ah *ibah,
5211 				struct rdma_ah_init_attr *attr,
5212 				struct ib_udata *udata)
5213 {
5214 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
5215 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
5216 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
5217 	struct irdma_create_ah_resp uresp = {};
5218 	struct irdma_ah *parent_ah;
5219 	int err;
5220 
5221 	if (udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
5222 		return -EINVAL;
5223 
5224 	err = irdma_setup_ah(ibah, attr);
5225 	if (err)
5226 		return err;
5227 	mutex_lock(&iwdev->rf->ah_tbl_lock);
5228 	if (!irdma_ah_exists(iwdev, ah)) {
5229 		err = irdma_create_hw_ah(iwdev, ah, true);
5230 		if (err) {
5231 			mutex_unlock(&iwdev->rf->ah_tbl_lock);
5232 			return err;
5233 		}
5234 		/* Add new AH to list */
5235 		parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
5236 		if (parent_ah) {
5237 			u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
5238 				  parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
5239 				  parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
5240 				  parent_ah->sc_ah.ah_info.dest_ip_addr[3];
5241 
5242 			ah->parent_ah = parent_ah;
5243 			hash_add(iwdev->rf->ah_hash_tbl, &parent_ah->list, key);
5244 			refcount_set(&parent_ah->refcnt, 1);
5245 		}
5246 	}
5247 	mutex_unlock(&iwdev->rf->ah_tbl_lock);
5248 
5249 	uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
5250 	err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
5251 	if (err)
5252 		irdma_destroy_ah(ibah, attr->flags);
5253 
5254 	return err;
5255 }
5256 
5257 /**
5258  * irdma_create_ah - create address handle
5259  * @ibah: address handle
5260  * @attr: address handle attributes
5261  * @udata: NULL
5262  *
5263  * returns 0 on success, error otherwise
5264  */
5265 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
5266 			   struct ib_udata *udata)
5267 {
5268 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
5269 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
5270 	int err;
5271 
5272 	err = irdma_setup_ah(ibah, attr);
5273 	if (err)
5274 		return err;
5275 	err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
5276 
5277 	return err;
5278 }
5279 
5280 /**
5281  * irdma_query_ah - Query address handle
5282  * @ibah: pointer to address handle
5283  * @ah_attr: address handle attributes
5284  */
5285 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
5286 {
5287 	struct irdma_ah *ah = to_iwah(ibah);
5288 
5289 	memset(ah_attr, 0, sizeof(*ah_attr));
5290 	if (ah->av.attrs.ah_flags & IB_AH_GRH) {
5291 		ah_attr->ah_flags = IB_AH_GRH;
5292 		ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
5293 		ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
5294 		ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
5295 		ah_attr->grh.sgid_index = ah->sgid_index;
5296 		memcpy(&ah_attr->grh.dgid, &ah->dgid,
5297 		       sizeof(ah_attr->grh.dgid));
5298 	}
5299 
5300 	return 0;
5301 }
5302 
5303 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
5304 						 u32 port_num)
5305 {
5306 	return IB_LINK_LAYER_ETHERNET;
5307 }
5308 
5309 static const struct ib_device_ops irdma_gen1_dev_ops = {
5310 	.dealloc_driver = irdma_ib_dealloc_device,
5311 };
5312 
5313 static const struct ib_device_ops irdma_gen3_dev_ops = {
5314 	.alloc_mw = irdma_alloc_mw,
5315 	.create_srq = irdma_create_srq,
5316 	.dealloc_mw = irdma_dealloc_mw,
5317 	.destroy_srq = irdma_destroy_srq,
5318 	.modify_srq = irdma_modify_srq,
5319 	.post_srq_recv = irdma_post_srq_recv,
5320 	.query_srq = irdma_query_srq,
5321 };
5322 
5323 static const struct ib_device_ops irdma_roce_dev_ops = {
5324 	.attach_mcast = irdma_attach_mcast,
5325 	.create_ah = irdma_create_ah,
5326 	.create_user_ah = irdma_create_user_ah,
5327 	.destroy_ah = irdma_destroy_ah,
5328 	.detach_mcast = irdma_detach_mcast,
5329 	.get_link_layer = irdma_get_link_layer,
5330 	.get_port_immutable = irdma_roce_port_immutable,
5331 	.modify_qp = irdma_modify_qp_roce,
5332 	.query_ah = irdma_query_ah,
5333 	.query_pkey = irdma_query_pkey,
5334 };
5335 
5336 static const struct ib_device_ops irdma_iw_dev_ops = {
5337 	.get_port_immutable = irdma_iw_port_immutable,
5338 	.iw_accept = irdma_accept,
5339 	.iw_add_ref = irdma_qp_add_ref,
5340 	.iw_connect = irdma_connect,
5341 	.iw_create_listen = irdma_create_listen,
5342 	.iw_destroy_listen = irdma_destroy_listen,
5343 	.iw_get_qp = irdma_get_qp,
5344 	.iw_reject = irdma_reject,
5345 	.iw_rem_ref = irdma_qp_rem_ref,
5346 	.modify_qp = irdma_modify_qp,
5347 	.query_gid = irdma_query_gid,
5348 };
5349 
5350 static const struct ib_device_ops irdma_dev_ops = {
5351 	.owner = THIS_MODULE,
5352 	.driver_id = RDMA_DRIVER_IRDMA,
5353 	.uverbs_abi_ver = IRDMA_ABI_VER,
5354 
5355 	.alloc_hw_port_stats = irdma_alloc_hw_port_stats,
5356 	.alloc_mr = irdma_alloc_mr,
5357 	.alloc_pd = irdma_alloc_pd,
5358 	.alloc_ucontext = irdma_alloc_ucontext,
5359 	.create_cq = irdma_create_cq,
5360 	.create_qp = irdma_create_qp,
5361 	.dealloc_driver = irdma_ib_dealloc_device,
5362 	.dealloc_mw = irdma_dealloc_mw,
5363 	.dealloc_pd = irdma_dealloc_pd,
5364 	.dealloc_ucontext = irdma_dealloc_ucontext,
5365 	.dereg_mr = irdma_dereg_mr,
5366 	.destroy_cq = irdma_destroy_cq,
5367 	.destroy_qp = irdma_destroy_qp,
5368 	.disassociate_ucontext = irdma_disassociate_ucontext,
5369 	.get_dev_fw_str = irdma_get_dev_fw_str,
5370 	.get_dma_mr = irdma_get_dma_mr,
5371 	.get_hw_stats = irdma_get_hw_stats,
5372 	.map_mr_sg = irdma_map_mr_sg,
5373 	.mmap = irdma_mmap,
5374 	.mmap_free = irdma_mmap_free,
5375 	.poll_cq = irdma_poll_cq,
5376 	.post_recv = irdma_post_recv,
5377 	.post_send = irdma_post_send,
5378 	.query_device = irdma_query_device,
5379 	.query_port = irdma_query_port,
5380 	.query_qp = irdma_query_qp,
5381 	.reg_user_mr = irdma_reg_user_mr,
5382 	.reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
5383 	.rereg_user_mr = irdma_rereg_user_mr,
5384 	.req_notify_cq = irdma_req_notify_cq,
5385 	.resize_cq = irdma_resize_cq,
5386 	INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
5387 	INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
5388 	INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
5389 	INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
5390 	INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
5391 	INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
5392 	INIT_RDMA_OBJ_SIZE(ib_srq, irdma_srq, ibsrq),
5393 };
5394 
5395 /**
5396  * irdma_init_roce_device - initialization of roce rdma device
5397  * @iwdev: irdma device
5398  */
5399 static void irdma_init_roce_device(struct irdma_device *iwdev)
5400 {
5401 	iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
5402 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
5403 			    iwdev->netdev->dev_addr);
5404 	ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
5405 }
5406 
5407 /**
5408  * irdma_init_iw_device - initialization of iwarp rdma device
5409  * @iwdev: irdma device
5410  */
5411 static void irdma_init_iw_device(struct irdma_device *iwdev)
5412 {
5413 	struct net_device *netdev = iwdev->netdev;
5414 
5415 	iwdev->ibdev.node_type = RDMA_NODE_RNIC;
5416 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
5417 			    netdev->dev_addr);
5418 	memcpy(iwdev->ibdev.iw_ifname, netdev->name,
5419 	       sizeof(iwdev->ibdev.iw_ifname));
5420 	ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
5421 }
5422 
5423 /**
5424  * irdma_init_rdma_device - initialization of rdma device
5425  * @iwdev: irdma device
5426  */
5427 static void irdma_init_rdma_device(struct irdma_device *iwdev)
5428 {
5429 	struct pci_dev *pcidev = iwdev->rf->pcidev;
5430 
5431 	if (iwdev->roce_mode)
5432 		irdma_init_roce_device(iwdev);
5433 	else
5434 		irdma_init_iw_device(iwdev);
5435 
5436 	iwdev->ibdev.phys_port_cnt = 1;
5437 	iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
5438 	iwdev->ibdev.dev.parent = &pcidev->dev;
5439 	ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
5440 	if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
5441 		ib_set_device_ops(&iwdev->ibdev, &irdma_gen1_dev_ops);
5442 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_3)
5443 		ib_set_device_ops(&iwdev->ibdev, &irdma_gen3_dev_ops);
5444 }
5445 
5446 /**
5447  * irdma_port_ibevent - indicate port event
5448  * @iwdev: irdma device
5449  */
5450 void irdma_port_ibevent(struct irdma_device *iwdev)
5451 {
5452 	struct ib_event event;
5453 
5454 	event.device = &iwdev->ibdev;
5455 	event.element.port_num = 1;
5456 	event.event =
5457 		iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
5458 	ib_dispatch_event(&event);
5459 }
5460 
5461 /**
5462  * irdma_ib_unregister_device - unregister rdma device from IB
5463  * core
5464  * @iwdev: irdma device
5465  */
5466 void irdma_ib_unregister_device(struct irdma_device *iwdev)
5467 {
5468 	iwdev->iw_status = 0;
5469 	irdma_port_ibevent(iwdev);
5470 	ib_unregister_device(&iwdev->ibdev);
5471 }
5472 
5473 /**
5474  * irdma_ib_register_device - register irdma device to IB core
5475  * @iwdev: irdma device
5476  */
5477 int irdma_ib_register_device(struct irdma_device *iwdev)
5478 {
5479 	int ret;
5480 
5481 	irdma_init_rdma_device(iwdev);
5482 
5483 	ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
5484 	if (ret)
5485 		goto error;
5486 	dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
5487 	ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
5488 	if (ret)
5489 		goto error;
5490 
5491 	iwdev->iw_status = 1;
5492 	irdma_port_ibevent(iwdev);
5493 
5494 	return 0;
5495 
5496 error:
5497 	if (ret)
5498 		ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
5499 
5500 	return ret;
5501 }
5502 
5503 /**
5504  * irdma_ib_dealloc_device
5505  * @ibdev: ib device
5506  *
5507  * callback from ibdev dealloc_driver to deallocate resources
5508  * unber irdma device
5509  */
5510 void irdma_ib_dealloc_device(struct ib_device *ibdev)
5511 {
5512 	struct irdma_device *iwdev = to_iwdev(ibdev);
5513 
5514 	irdma_rt_deinit_hw(iwdev);
5515 	if (!iwdev->is_vport) {
5516 		irdma_ctrl_deinit_hw(iwdev->rf);
5517 		if (iwdev->rf->vchnl_wq) {
5518 			destroy_workqueue(iwdev->rf->vchnl_wq);
5519 			mutex_destroy(&iwdev->rf->sc_dev.vchnl_mutex);
5520 		}
5521 	}
5522 }
5523