xref: /linux/drivers/infiniband/hw/irdma/verbs.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4 
5 /**
6  * irdma_query_device - get device attributes
7  * @ibdev: device pointer from stack
8  * @props: returning device attributes
9  * @udata: user data
10  */
irdma_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)11 static int irdma_query_device(struct ib_device *ibdev,
12 			      struct ib_device_attr *props,
13 			      struct ib_udata *udata)
14 {
15 	struct irdma_device *iwdev = to_iwdev(ibdev);
16 	struct irdma_pci_f *rf = iwdev->rf;
17 	struct pci_dev *pcidev = iwdev->rf->pcidev;
18 	struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
19 
20 	if (udata->inlen || udata->outlen)
21 		return -EINVAL;
22 
23 	memset(props, 0, sizeof(*props));
24 	addrconf_addr_eui48((u8 *)&props->sys_image_guid,
25 			    iwdev->netdev->dev_addr);
26 	props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
27 			irdma_fw_minor_ver(&rf->sc_dev);
28 	props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
29 				  IB_DEVICE_MEM_MGT_EXTENSIONS;
30 	if (hw_attrs->uk_attrs.hw_rev < IRDMA_GEN_3)
31 		props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
32 	props->vendor_id = pcidev->vendor;
33 	props->vendor_part_id = pcidev->device;
34 
35 	props->hw_ver = rf->pcidev->revision;
36 	props->page_size_cap = hw_attrs->page_size_cap;
37 	props->max_mr_size = hw_attrs->max_mr_size;
38 	props->max_qp = rf->max_qp - rf->used_qps;
39 	props->max_qp_wr = hw_attrs->max_qp_wr;
40 	props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
41 	props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
42 	props->max_cq = rf->max_cq - rf->used_cqs;
43 	props->max_cqe = rf->max_cqe - 1;
44 	props->max_mr = rf->max_mr - rf->used_mrs;
45 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
46 		props->max_mw = props->max_mr;
47 	props->max_pd = rf->max_pd - rf->used_pds;
48 	props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
49 	props->max_qp_rd_atom = hw_attrs->max_hw_ird;
50 	props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
51 	if (rdma_protocol_roce(ibdev, 1)) {
52 		props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
53 		props->max_pkeys = IRDMA_PKEY_TBL_SZ;
54 	}
55 
56 	props->max_ah = rf->max_ah;
57 	props->max_mcast_grp = rf->max_mcg;
58 	props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
59 	props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
60 	props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
61 	props->max_srq = rf->max_srq - rf->used_srqs;
62 	props->max_srq_wr = IRDMA_MAX_SRQ_WRS;
63 	props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
64 	if (hw_attrs->uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
65 		props->atomic_cap = IB_ATOMIC_HCA;
66 	else
67 		props->atomic_cap = IB_ATOMIC_NONE;
68 	props->masked_atomic_cap = props->atomic_cap;
69 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) {
70 #define HCA_CORE_CLOCK_KHZ 1000000UL
71 		props->timestamp_mask = GENMASK(31, 0);
72 		props->hca_core_clock = HCA_CORE_CLOCK_KHZ;
73 	}
74 	if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
75 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
76 
77 	return 0;
78 }
79 
80 /**
81  * irdma_query_port - get port attributes
82  * @ibdev: device pointer from stack
83  * @port: port number for query
84  * @props: returning device attributes
85  */
irdma_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)86 static int irdma_query_port(struct ib_device *ibdev, u32 port,
87 			    struct ib_port_attr *props)
88 {
89 	struct irdma_device *iwdev = to_iwdev(ibdev);
90 	struct net_device *netdev = iwdev->netdev;
91 
92 	/* no need to zero out pros here. done by caller */
93 
94 	props->max_mtu = IB_MTU_4096;
95 	props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
96 	props->lid = 1;
97 	props->lmc = 0;
98 	props->sm_lid = 0;
99 	props->sm_sl = 0;
100 	if (netif_carrier_ok(netdev) && netif_running(netdev)) {
101 		props->state = IB_PORT_ACTIVE;
102 		props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
103 	} else {
104 		props->state = IB_PORT_DOWN;
105 		props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
106 	}
107 
108 	ib_get_eth_speed(ibdev, port, &props->active_speed,
109 			 &props->active_width);
110 
111 	if (rdma_protocol_roce(ibdev, 1)) {
112 		props->gid_tbl_len = 32;
113 		props->ip_gids = true;
114 		props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
115 	} else {
116 		props->gid_tbl_len = 1;
117 	}
118 	props->qkey_viol_cntr = 0;
119 	props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
120 	props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
121 
122 	return 0;
123 }
124 
125 /**
126  * irdma_disassociate_ucontext - Disassociate user context
127  * @context: ib user context
128  */
irdma_disassociate_ucontext(struct ib_ucontext * context)129 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
130 {
131 }
132 
irdma_mmap_legacy(struct irdma_ucontext * ucontext,struct vm_area_struct * vma)133 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
134 			     struct vm_area_struct *vma)
135 {
136 	u64 pfn;
137 
138 	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
139 		return -EINVAL;
140 
141 	vma->vm_private_data = ucontext;
142 	pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
143 	       pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
144 
145 	return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
146 				 pgprot_noncached(vma->vm_page_prot), NULL);
147 }
148 
irdma_mmap_free(struct rdma_user_mmap_entry * rdma_entry)149 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
150 {
151 	struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
152 
153 	kfree(entry);
154 }
155 
156 static struct rdma_user_mmap_entry*
irdma_user_mmap_entry_insert(struct irdma_ucontext * ucontext,u64 bar_offset,enum irdma_mmap_flag mmap_flag,u64 * mmap_offset)157 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
158 			     enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
159 {
160 	struct irdma_user_mmap_entry *entry = kzalloc_obj(*entry);
161 	int ret;
162 
163 	if (!entry)
164 		return NULL;
165 
166 	entry->bar_offset = bar_offset;
167 	entry->mmap_flag = mmap_flag;
168 
169 	ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
170 					  &entry->rdma_entry, PAGE_SIZE);
171 	if (ret) {
172 		kfree(entry);
173 		return NULL;
174 	}
175 	*mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
176 
177 	return &entry->rdma_entry;
178 }
179 
180 /**
181  * irdma_mmap - user memory map
182  * @context: context created during alloc
183  * @vma: kernel info for user memory map
184  */
irdma_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)185 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
186 {
187 	struct rdma_user_mmap_entry *rdma_entry;
188 	struct irdma_user_mmap_entry *entry;
189 	struct irdma_ucontext *ucontext;
190 	u64 pfn;
191 	int ret;
192 
193 	ucontext = to_ucontext(context);
194 
195 	/* Legacy support for libi40iw with hard-coded mmap key */
196 	if (ucontext->legacy_mode)
197 		return irdma_mmap_legacy(ucontext, vma);
198 
199 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
200 	if (!rdma_entry) {
201 		ibdev_dbg(&ucontext->iwdev->ibdev,
202 			  "VERBS: pgoff[0x%lx] does not have valid entry\n",
203 			  vma->vm_pgoff);
204 		return -EINVAL;
205 	}
206 
207 	entry = to_irdma_mmap_entry(rdma_entry);
208 	ibdev_dbg(&ucontext->iwdev->ibdev,
209 		  "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
210 		  entry->bar_offset, entry->mmap_flag);
211 
212 	pfn = (entry->bar_offset +
213 	      pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
214 
215 	switch (entry->mmap_flag) {
216 	case IRDMA_MMAP_IO_NC:
217 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
218 					pgprot_noncached(vma->vm_page_prot),
219 					rdma_entry);
220 		break;
221 	case IRDMA_MMAP_IO_WC:
222 		ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
223 					pgprot_writecombine(vma->vm_page_prot),
224 					rdma_entry);
225 		break;
226 	default:
227 		ret = -EINVAL;
228 	}
229 
230 	if (ret)
231 		ibdev_dbg(&ucontext->iwdev->ibdev,
232 			  "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
233 			  entry->bar_offset, entry->mmap_flag, ret);
234 	rdma_user_mmap_entry_put(rdma_entry);
235 
236 	return ret;
237 }
238 
239 /**
240  * irdma_alloc_push_page - allocate a push page for qp
241  * @iwqp: qp pointer
242  */
irdma_alloc_push_page(struct irdma_qp * iwqp)243 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
244 {
245 	struct irdma_cqp_request *cqp_request;
246 	struct cqp_cmds_info *cqp_info;
247 	struct irdma_device *iwdev = iwqp->iwdev;
248 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
249 	int status;
250 
251 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
252 	if (!cqp_request)
253 		return;
254 
255 	cqp_info = &cqp_request->info;
256 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
257 	cqp_info->post_sq = 1;
258 	cqp_info->in.u.manage_push_page.info.push_idx = 0;
259 	cqp_info->in.u.manage_push_page.info.qs_handle =
260 		qp->vsi->qos[qp->user_pri].qs_handle;
261 	cqp_info->in.u.manage_push_page.info.free_page = 0;
262 	cqp_info->in.u.manage_push_page.info.push_page_type = 0;
263 	cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
264 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
265 
266 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
267 	if (!status && cqp_request->compl_info.op_ret_val <
268 	    iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
269 		qp->push_idx = cqp_request->compl_info.op_ret_val;
270 		qp->push_offset = 0;
271 	}
272 
273 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
274 }
275 
276 /**
277  * irdma_alloc_ucontext - Allocate the user context data structure
278  * @uctx: uverbs context pointer
279  * @udata: user data
280  *
281  * This keeps track of all objects associated with a particular
282  * user-mode client.
283  */
irdma_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)284 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
285 				struct ib_udata *udata)
286 {
287 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
288 	struct ib_device *ibdev = uctx->device;
289 	struct irdma_device *iwdev = to_iwdev(ibdev);
290 	struct irdma_alloc_ucontext_req req = {};
291 	struct irdma_alloc_ucontext_resp uresp = {};
292 	struct irdma_ucontext *ucontext = to_ucontext(uctx);
293 	struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
294 	int ret;
295 
296 	if (udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
297 		return -EINVAL;
298 
299 	ret = ib_copy_validate_udata_in_cm(udata, req, rsvd8,
300 					   IRDMA_ALLOC_UCTX_USE_RAW_ATTR |
301 						   IRDMA_SUPPORT_WQE_FORMAT_V2);
302 	if (ret)
303 		return ret;
304 
305 	if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
306 		goto ver_error;
307 
308 	ucontext->iwdev = iwdev;
309 	ucontext->abi_ver = req.userspace_ver;
310 
311 	if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) &&
312 	    uk_attrs->hw_rev >= IRDMA_GEN_3)
313 		return -EOPNOTSUPP;
314 
315 	if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
316 		ucontext->use_raw_attrs = true;
317 
318 	/* GEN_1 legacy support with libi40iw */
319 	if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
320 		if (uk_attrs->hw_rev != IRDMA_GEN_1)
321 			return -EOPNOTSUPP;
322 
323 		ucontext->legacy_mode = true;
324 		uresp.max_qps = iwdev->rf->max_qp;
325 		uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
326 		uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
327 		uresp.kernel_ver = req.userspace_ver;
328 		if (ib_copy_to_udata(udata, &uresp,
329 				     min(sizeof(uresp), udata->outlen)))
330 			return -EFAULT;
331 	} else {
332 		u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
333 
334 		ucontext->db_mmap_entry =
335 			irdma_user_mmap_entry_insert(ucontext, bar_off,
336 						     IRDMA_MMAP_IO_NC,
337 						     &uresp.db_mmap_key);
338 		if (!ucontext->db_mmap_entry)
339 			return -ENOMEM;
340 
341 		uresp.kernel_ver = IRDMA_ABI_VER;
342 		uresp.feature_flags = uk_attrs->feature_flags;
343 		uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
344 		uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
345 		uresp.max_hw_inline = uk_attrs->max_hw_inline;
346 		uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
347 		uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
348 		uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
349 		uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
350 		uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
351 		uresp.hw_rev = uk_attrs->hw_rev;
352 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
353 		uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
354 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
355 		uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta;
356 		uresp.comp_mask |= IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA;
357 		if (ib_copy_to_udata(udata, &uresp,
358 				     min(sizeof(uresp), udata->outlen))) {
359 			rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
360 			return -EFAULT;
361 		}
362 	}
363 
364 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
365 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
366 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
367 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
368 	INIT_LIST_HEAD(&ucontext->srq_reg_mem_list);
369 	spin_lock_init(&ucontext->srq_reg_mem_list_lock);
370 
371 	return 0;
372 
373 ver_error:
374 	ibdev_err(&iwdev->ibdev,
375 		  "Invalid userspace driver version detected. Detected version %d, should be %d\n",
376 		  req.userspace_ver, IRDMA_ABI_VER);
377 	return -EINVAL;
378 }
379 
380 /**
381  * irdma_dealloc_ucontext - deallocate the user context data structure
382  * @context: user context created during alloc
383  */
irdma_dealloc_ucontext(struct ib_ucontext * context)384 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
385 {
386 	struct irdma_ucontext *ucontext = to_ucontext(context);
387 
388 	rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
389 }
390 
391 /**
392  * irdma_alloc_pd - allocate protection domain
393  * @pd: PD pointer
394  * @udata: user data
395  */
irdma_alloc_pd(struct ib_pd * pd,struct ib_udata * udata)396 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
397 {
398 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
399 	struct irdma_pd *iwpd = to_iwpd(pd);
400 	struct irdma_device *iwdev = to_iwdev(pd->device);
401 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
402 	struct irdma_pci_f *rf = iwdev->rf;
403 	struct irdma_alloc_pd_resp uresp = {};
404 	struct irdma_sc_pd *sc_pd;
405 	u32 pd_id = 0;
406 	int err;
407 
408 	if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
409 		return -EINVAL;
410 
411 	err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
412 			       &rf->next_pd);
413 	if (err)
414 		return err;
415 
416 	sc_pd = &iwpd->sc_pd;
417 	if (udata) {
418 		struct irdma_ucontext *ucontext =
419 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
420 						  ibucontext);
421 		irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
422 		uresp.pd_id = pd_id;
423 		if (ib_copy_to_udata(udata, &uresp,
424 				     min(sizeof(uresp), udata->outlen))) {
425 			err = -EFAULT;
426 			goto error;
427 		}
428 	} else {
429 		irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
430 	}
431 
432 	return 0;
433 error:
434 	irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
435 
436 	return err;
437 }
438 
439 /**
440  * irdma_dealloc_pd - deallocate pd
441  * @ibpd: ptr of pd to be deallocated
442  * @udata: user data
443  */
irdma_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)444 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
445 {
446 	struct irdma_pd *iwpd = to_iwpd(ibpd);
447 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
448 
449 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
450 
451 	return 0;
452 }
453 
454 /**
455  * irdma_get_pbl - Retrieve pbl from a list given a virtual
456  * address
457  * @va: user virtual address
458  * @pbl_list: pbl list to search in (QP's or CQ's)
459  */
irdma_get_pbl(unsigned long va,struct list_head * pbl_list)460 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
461 				       struct list_head *pbl_list)
462 {
463 	struct irdma_pbl *iwpbl;
464 
465 	list_for_each_entry (iwpbl, pbl_list, list) {
466 		if (iwpbl->user_base == va) {
467 			list_del(&iwpbl->list);
468 			iwpbl->on_list = false;
469 			return iwpbl;
470 		}
471 	}
472 
473 	return NULL;
474 }
475 
476 /**
477  * irdma_clean_cqes - clean cq entries for qp
478  * @iwqp: qp ptr (user or kernel)
479  * @iwcq: cq ptr
480  */
irdma_clean_cqes(struct irdma_qp * iwqp,struct irdma_cq * iwcq)481 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
482 {
483 	struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
484 	unsigned long flags;
485 
486 	spin_lock_irqsave(&iwcq->lock, flags);
487 	irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
488 	spin_unlock_irqrestore(&iwcq->lock, flags);
489 }
490 
irdma_remove_push_mmap_entries(struct irdma_qp * iwqp)491 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
492 {
493 	if (iwqp->push_db_mmap_entry) {
494 		rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
495 		iwqp->push_db_mmap_entry = NULL;
496 	}
497 	if (iwqp->push_wqe_mmap_entry) {
498 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
499 		iwqp->push_wqe_mmap_entry = NULL;
500 	}
501 }
502 
irdma_setup_push_mmap_entries(struct irdma_ucontext * ucontext,struct irdma_qp * iwqp,u64 * push_wqe_mmap_key,u64 * push_db_mmap_key)503 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
504 					 struct irdma_qp *iwqp,
505 					 u64 *push_wqe_mmap_key,
506 					 u64 *push_db_mmap_key)
507 {
508 	struct irdma_device *iwdev = ucontext->iwdev;
509 	u64 rsvd, bar_off;
510 
511 	rsvd = IRDMA_PF_BAR_RSVD;
512 	bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
513 	/* skip over db page */
514 	bar_off += IRDMA_HW_PAGE_SIZE;
515 	/* push wqe page */
516 	bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
517 	iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
518 					bar_off, IRDMA_MMAP_IO_WC,
519 					push_wqe_mmap_key);
520 	if (!iwqp->push_wqe_mmap_entry)
521 		return -ENOMEM;
522 
523 	/* push doorbell page */
524 	bar_off += IRDMA_HW_PAGE_SIZE;
525 	iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
526 					bar_off, IRDMA_MMAP_IO_NC,
527 					push_db_mmap_key);
528 	if (!iwqp->push_db_mmap_entry) {
529 		rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
530 		return -ENOMEM;
531 	}
532 
533 	return 0;
534 }
535 
536 /**
537  * irdma_destroy_qp - destroy qp
538  * @ibqp: qp's ib pointer also to get to device's qp address
539  * @udata: user data
540  */
irdma_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)541 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
542 {
543 	struct irdma_qp *iwqp = to_iwqp(ibqp);
544 	struct irdma_device *iwdev = iwqp->iwdev;
545 
546 	iwqp->sc_qp.qp_uk.destroy_pending = true;
547 
548 	if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
549 		irdma_modify_qp_to_err(&iwqp->sc_qp);
550 
551 	if (!iwqp->user_mode)
552 		cancel_delayed_work_sync(&iwqp->dwork_flush);
553 
554 	if (!iwqp->user_mode) {
555 		if (iwqp->iwscq) {
556 			irdma_clean_cqes(iwqp, iwqp->iwscq);
557 			if (iwqp->iwrcq != iwqp->iwscq)
558 				irdma_clean_cqes(iwqp, iwqp->iwrcq);
559 		}
560 	}
561 
562 	irdma_qp_rem_ref(&iwqp->ibqp);
563 	if (!iwdev->rf->reset)
564 		wait_for_completion(&iwqp->free_qp);
565 	irdma_free_lsmm_rsrc(iwqp);
566 	irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
567 
568 	irdma_remove_push_mmap_entries(iwqp);
569 
570 	if (iwqp->sc_qp.qp_uk.qp_id == 1)
571 		iwdev->rf->hwqp1_rsvd = false;
572 	irdma_free_qp_rsrc(iwqp);
573 
574 	return 0;
575 }
576 
577 /**
578  * irdma_setup_virt_qp - setup for allocation of virtual qp
579  * @iwdev: irdma device
580  * @iwqp: qp ptr
581  * @init_info: initialize info to return
582  */
irdma_setup_virt_qp(struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_qp_init_info * init_info)583 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
584 			       struct irdma_qp *iwqp,
585 			       struct irdma_qp_init_info *init_info)
586 {
587 	struct irdma_pbl *iwpbl = iwqp->iwpbl;
588 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
589 
590 	iwqp->page = qpmr->sq_page;
591 	init_info->shadow_area_pa = qpmr->shadow;
592 	if (iwpbl->pbl_allocated) {
593 		init_info->virtual_map = true;
594 		init_info->sq_pa = qpmr->sq_pbl.idx;
595 		/* Need to use contiguous buffer for RQ of QP
596 		 * in case it is associated with SRQ.
597 		 */
598 		init_info->rq_pa = init_info->qp_uk_init_info.srq_uk ?
599 			qpmr->rq_pa : qpmr->rq_pbl.idx;
600 	} else {
601 		init_info->sq_pa = qpmr->sq_pbl.addr;
602 		init_info->rq_pa = qpmr->rq_pbl.addr;
603 	}
604 }
605 
606 /**
607  * irdma_setup_umode_qp - setup sq and rq size in user mode qp
608  * @udata: udata
609  * @iwdev: iwarp device
610  * @iwqp: qp ptr (user or kernel)
611  * @info: initialize info to return
612  * @init_attr: Initial QP create attributes
613  */
irdma_setup_umode_qp(struct ib_udata * udata,struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_qp_init_info * info,struct ib_qp_init_attr * init_attr)614 static int irdma_setup_umode_qp(struct ib_udata *udata,
615 				struct irdma_device *iwdev,
616 				struct irdma_qp *iwqp,
617 				struct irdma_qp_init_info *info,
618 				struct ib_qp_init_attr *init_attr)
619 {
620 	struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
621 				struct irdma_ucontext, ibucontext);
622 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
623 	struct irdma_create_qp_req req;
624 	unsigned long flags;
625 	int ret;
626 
627 	ret = ib_copy_from_udata(&req, udata,
628 				 min(sizeof(req), udata->inlen));
629 	if (ret) {
630 		ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
631 		return ret;
632 	}
633 
634 	iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
635 	iwqp->user_mode = 1;
636 	if (req.user_wqe_bufs) {
637 		info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
638 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
639 		iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
640 					    &ucontext->qp_reg_mem_list);
641 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
642 
643 		if (!iwqp->iwpbl) {
644 			ret = -ENODATA;
645 			ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
646 			return ret;
647 		}
648 	}
649 
650 	if (!ucontext->use_raw_attrs) {
651 		/**
652 		 * Maintain backward compat with older ABI which passes sq and
653 		 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
654 		 * There is no way to compute the correct value of
655 		 * iwqp->max_send_wr/max_recv_wr in the kernel.
656 		 */
657 		iwqp->max_send_wr = init_attr->cap.max_send_wr;
658 		iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
659 		ukinfo->sq_size = init_attr->cap.max_send_wr;
660 		ukinfo->rq_size = init_attr->cap.max_recv_wr;
661 		irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
662 				       &ukinfo->rq_shift);
663 	} else {
664 		ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
665 						   &ukinfo->sq_shift);
666 		if (ret)
667 			return ret;
668 
669 		ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
670 						   &ukinfo->rq_shift);
671 		if (ret)
672 			return ret;
673 
674 		iwqp->max_send_wr =
675 			(ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
676 		iwqp->max_recv_wr =
677 			(ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
678 		ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
679 		ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
680 	}
681 
682 	irdma_setup_virt_qp(iwdev, iwqp, info);
683 
684 	return 0;
685 }
686 
687 /**
688  * irdma_setup_kmode_qp - setup initialization for kernel mode qp
689  * @iwdev: iwarp device
690  * @iwqp: qp ptr (user or kernel)
691  * @info: initialize info to return
692  * @init_attr: Initial QP create attributes
693  */
irdma_setup_kmode_qp(struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_qp_init_info * info,struct ib_qp_init_attr * init_attr)694 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
695 				struct irdma_qp *iwqp,
696 				struct irdma_qp_init_info *info,
697 				struct ib_qp_init_attr *init_attr)
698 {
699 	struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
700 	u32 size;
701 	int status;
702 	struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
703 
704 	status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
705 					      &ukinfo->sq_shift);
706 	if (status)
707 		return status;
708 
709 	status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
710 					      &ukinfo->rq_shift);
711 	if (status)
712 		return status;
713 
714 	iwqp->kqp.sq_wrid_mem =
715 		kzalloc_objs(*iwqp->kqp.sq_wrid_mem, ukinfo->sq_depth);
716 	if (!iwqp->kqp.sq_wrid_mem)
717 		return -ENOMEM;
718 
719 	iwqp->kqp.rq_wrid_mem =
720 		kzalloc_objs(*iwqp->kqp.rq_wrid_mem, ukinfo->rq_depth);
721 
722 	if (!iwqp->kqp.rq_wrid_mem) {
723 		kfree(iwqp->kqp.sq_wrid_mem);
724 		iwqp->kqp.sq_wrid_mem = NULL;
725 		return -ENOMEM;
726 	}
727 
728 	ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
729 	ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
730 
731 	size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
732 	size += (IRDMA_SHADOW_AREA_SIZE << 3);
733 
734 	mem->size = ALIGN(size, 256);
735 	mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
736 				     &mem->pa, GFP_KERNEL);
737 	if (!mem->va) {
738 		kfree(iwqp->kqp.sq_wrid_mem);
739 		iwqp->kqp.sq_wrid_mem = NULL;
740 		kfree(iwqp->kqp.rq_wrid_mem);
741 		iwqp->kqp.rq_wrid_mem = NULL;
742 		return -ENOMEM;
743 	}
744 
745 	ukinfo->sq = mem->va;
746 	info->sq_pa = mem->pa;
747 	ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
748 	info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
749 	ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
750 	info->shadow_area_pa =
751 		info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
752 	ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
753 	ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
754 	ukinfo->qp_id = info->qp_uk_init_info.qp_id;
755 
756 	iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
757 	iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
758 	init_attr->cap.max_send_wr = iwqp->max_send_wr;
759 	init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
760 
761 	return 0;
762 }
763 
irdma_cqp_create_qp_cmd(struct irdma_qp * iwqp)764 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
765 {
766 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
767 	struct irdma_cqp_request *cqp_request;
768 	struct cqp_cmds_info *cqp_info;
769 	struct irdma_create_qp_info *qp_info;
770 	int status;
771 
772 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
773 	if (!cqp_request)
774 		return -ENOMEM;
775 
776 	cqp_info = &cqp_request->info;
777 	qp_info = &cqp_request->info.in.u.qp_create.info;
778 	qp_info->mac_valid = true;
779 	qp_info->cq_num_valid = true;
780 	qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
781 
782 	cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
783 	cqp_info->post_sq = 1;
784 	cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
785 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
786 	status = irdma_handle_cqp_op(rf, cqp_request);
787 	irdma_put_cqp_request(&rf->cqp, cqp_request);
788 
789 	return status;
790 }
791 
irdma_roce_fill_and_set_qpctx_info(struct irdma_qp * iwqp,struct irdma_qp_host_ctx_info * ctx_info)792 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
793 					       struct irdma_qp_host_ctx_info *ctx_info)
794 {
795 	struct irdma_device *iwdev = iwqp->iwdev;
796 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
797 	struct irdma_roce_offload_info *roce_info;
798 	struct irdma_udp_offload_info *udp_info;
799 
800 	udp_info = &iwqp->udp_info;
801 	udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
802 	udp_info->cwnd = iwdev->roce_cwnd;
803 	udp_info->rexmit_thresh = 2;
804 	udp_info->rnr_nak_thresh = 2;
805 	udp_info->src_port = 0xc000;
806 	udp_info->dst_port = ROCE_V2_UDP_DPORT;
807 	roce_info = &iwqp->roce_info;
808 	ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
809 
810 	if (iwqp->ibqp.qp_type == IB_QPT_GSI && iwqp->ibqp.qp_num != 1)
811 		roce_info->is_qp1 = true;
812 	roce_info->rd_en = true;
813 	roce_info->wr_rdresp_en = true;
814 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
815 		roce_info->bind_en = true;
816 	roce_info->dcqcn_en = false;
817 	roce_info->rtomin = 5;
818 
819 	roce_info->ack_credits = iwdev->roce_ackcreds;
820 	roce_info->ird_size = dev->hw_attrs.max_hw_ird;
821 	roce_info->ord_size = dev->hw_attrs.max_hw_ord;
822 
823 	if (!iwqp->user_mode) {
824 		roce_info->priv_mode_en = true;
825 		roce_info->fast_reg_en = true;
826 		roce_info->udprivcq_en = true;
827 	}
828 	roce_info->roce_tver = 0;
829 
830 	ctx_info->roce_info = &iwqp->roce_info;
831 	ctx_info->udp_info = &iwqp->udp_info;
832 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
833 }
834 
irdma_iw_fill_and_set_qpctx_info(struct irdma_qp * iwqp,struct irdma_qp_host_ctx_info * ctx_info)835 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
836 					     struct irdma_qp_host_ctx_info *ctx_info)
837 {
838 	struct irdma_device *iwdev = iwqp->iwdev;
839 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
840 	struct irdma_iwarp_offload_info *iwarp_info;
841 
842 	iwarp_info = &iwqp->iwarp_info;
843 	ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
844 	iwarp_info->rd_en = true;
845 	iwarp_info->wr_rdresp_en = true;
846 	iwarp_info->ecn_en = true;
847 	iwarp_info->rtomin = 5;
848 
849 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
850 		iwarp_info->ib_rd_en = true;
851 	if (!iwqp->user_mode) {
852 		iwarp_info->priv_mode_en = true;
853 		iwarp_info->fast_reg_en = true;
854 	}
855 	iwarp_info->ddp_ver = 1;
856 	iwarp_info->rdmap_ver = 1;
857 
858 	ctx_info->iwarp_info = &iwqp->iwarp_info;
859 	ctx_info->iwarp_info_valid = true;
860 	irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
861 	ctx_info->iwarp_info_valid = false;
862 }
863 
irdma_validate_qp_attrs(struct ib_qp_init_attr * init_attr,struct irdma_device * iwdev)864 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
865 				   struct irdma_device *iwdev)
866 {
867 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
868 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
869 
870 	if (init_attr->create_flags)
871 		return -EOPNOTSUPP;
872 
873 	if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
874 	    init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
875 	    init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
876 	    init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
877 	    init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
878 		return -EINVAL;
879 
880 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
881 		if (init_attr->qp_type != IB_QPT_RC &&
882 		    init_attr->qp_type != IB_QPT_UD &&
883 		    init_attr->qp_type != IB_QPT_GSI)
884 			return -EOPNOTSUPP;
885 	} else {
886 		if (init_attr->qp_type != IB_QPT_RC)
887 			return -EOPNOTSUPP;
888 	}
889 
890 	return 0;
891 }
892 
irdma_flush_worker(struct work_struct * work)893 static void irdma_flush_worker(struct work_struct *work)
894 {
895 	struct delayed_work *dwork = to_delayed_work(work);
896 	struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
897 
898 	irdma_generate_flush_completions(iwqp);
899 }
900 
irdma_setup_gsi_qp_rsrc(struct irdma_qp * iwqp,u32 * qp_num)901 static int irdma_setup_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 *qp_num)
902 {
903 	struct irdma_device *iwdev = iwqp->iwdev;
904 	struct irdma_pci_f *rf = iwdev->rf;
905 	unsigned long flags;
906 	int ret;
907 
908 	if (rf->rdma_ver <= IRDMA_GEN_2) {
909 		*qp_num = 1;
910 		return 0;
911 	}
912 
913 	spin_lock_irqsave(&rf->rsrc_lock, flags);
914 	if (!rf->hwqp1_rsvd) {
915 		*qp_num = 1;
916 		rf->hwqp1_rsvd = true;
917 		spin_unlock_irqrestore(&rf->rsrc_lock, flags);
918 	} else {
919 		spin_unlock_irqrestore(&rf->rsrc_lock, flags);
920 		ret = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
921 				       qp_num, &rf->next_qp);
922 		if (ret)
923 			return ret;
924 	}
925 
926 	ret = irdma_vchnl_req_add_vport(&rf->sc_dev, iwdev->vport_id, *qp_num,
927 					(&iwdev->vsi)->qos);
928 	if (ret) {
929 		if (*qp_num != 1) {
930 			irdma_free_rsrc(rf, rf->allocated_qps, *qp_num);
931 		} else {
932 			spin_lock_irqsave(&rf->rsrc_lock, flags);
933 			rf->hwqp1_rsvd = false;
934 			spin_unlock_irqrestore(&rf->rsrc_lock, flags);
935 		}
936 		return ret;
937 	}
938 
939 	return 0;
940 }
941 
942 /**
943  * irdma_create_qp - create qp
944  * @ibqp: ptr of qp
945  * @init_attr: attributes for qp
946  * @udata: user data for create qp
947  */
irdma_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)948 static int irdma_create_qp(struct ib_qp *ibqp,
949 			   struct ib_qp_init_attr *init_attr,
950 			   struct ib_udata *udata)
951 {
952 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
953 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
954 	struct ib_pd *ibpd = ibqp->pd;
955 	struct irdma_pd *iwpd = to_iwpd(ibpd);
956 	struct irdma_device *iwdev = to_iwdev(ibpd->device);
957 	struct irdma_pci_f *rf = iwdev->rf;
958 	struct irdma_qp *iwqp = to_iwqp(ibqp);
959 	struct irdma_create_qp_resp uresp = {};
960 	u32 qp_num = 0;
961 	int err_code;
962 	struct irdma_sc_qp *qp;
963 	struct irdma_sc_dev *dev = &rf->sc_dev;
964 	struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
965 	struct irdma_qp_init_info init_info = {};
966 	struct irdma_qp_host_ctx_info *ctx_info;
967 	struct irdma_srq *iwsrq;
968 	bool srq_valid = false;
969 	u32 srq_id = 0;
970 
971 	if (init_attr->srq) {
972 		iwsrq = to_iwsrq(init_attr->srq);
973 		srq_valid = true;
974 		srq_id = iwsrq->srq_num;
975 		init_attr->cap.max_recv_sge = uk_attrs->max_hw_wq_frags;
976 		init_attr->cap.max_recv_wr = 4;
977 		init_info.qp_uk_init_info.srq_uk = &iwsrq->sc_srq.srq_uk;
978 	}
979 
980 	err_code = irdma_validate_qp_attrs(init_attr, iwdev);
981 	if (err_code)
982 		return err_code;
983 
984 	if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
985 		      udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
986 		return -EINVAL;
987 
988 	init_info.vsi = &iwdev->vsi;
989 	init_info.qp_uk_init_info.uk_attrs = uk_attrs;
990 	init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
991 	init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
992 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
993 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
994 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
995 
996 	qp = &iwqp->sc_qp;
997 	qp->qp_uk.back_qp = iwqp;
998 	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
999 
1000 	iwqp->iwdev = iwdev;
1001 	iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
1002 				      256);
1003 	iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
1004 						 iwqp->q2_ctx_mem.size,
1005 						 &iwqp->q2_ctx_mem.pa,
1006 						 GFP_KERNEL);
1007 	if (!iwqp->q2_ctx_mem.va)
1008 		return -ENOMEM;
1009 
1010 	init_info.q2 = iwqp->q2_ctx_mem.va;
1011 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
1012 	init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
1013 	init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
1014 
1015 	if (init_attr->qp_type == IB_QPT_GSI) {
1016 		err_code = irdma_setup_gsi_qp_rsrc(iwqp, &qp_num);
1017 		if (err_code)
1018 			goto error;
1019 		iwqp->ibqp.qp_num = 1;
1020 	} else {
1021 		err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
1022 					    &qp_num, &rf->next_qp);
1023 		if (err_code)
1024 			goto error;
1025 		iwqp->ibqp.qp_num = qp_num;
1026 	}
1027 
1028 	iwqp->iwpd = iwpd;
1029 	qp = &iwqp->sc_qp;
1030 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
1031 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
1032 	iwqp->host_ctx.va = init_info.host_ctx;
1033 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
1034 	iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
1035 
1036 	init_info.pd = &iwpd->sc_pd;
1037 	init_info.qp_uk_init_info.qp_id = qp_num;
1038 	if (!rdma_protocol_roce(&iwdev->ibdev, 1))
1039 		init_info.qp_uk_init_info.first_sq_wq = 1;
1040 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
1041 	init_waitqueue_head(&iwqp->waitq);
1042 	init_waitqueue_head(&iwqp->mod_qp_waitq);
1043 
1044 	if (udata) {
1045 		init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
1046 		err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
1047 						init_attr);
1048 	} else {
1049 		INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
1050 		init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
1051 		err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
1052 	}
1053 
1054 	if (err_code) {
1055 		ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
1056 		goto error;
1057 	}
1058 
1059 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1060 		if (init_attr->qp_type == IB_QPT_RC) {
1061 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
1062 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1063 							    IRDMA_WRITE_WITH_IMM |
1064 							    IRDMA_ROCE;
1065 		} else {
1066 			init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
1067 			init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
1068 							    IRDMA_ROCE;
1069 		}
1070 	} else {
1071 		init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
1072 		init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
1073 	}
1074 
1075 	if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
1076 		init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
1077 
1078 	err_code = irdma_sc_qp_init(qp, &init_info);
1079 	if (err_code) {
1080 		ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
1081 		goto error;
1082 	}
1083 
1084 	ctx_info = &iwqp->ctx_info;
1085 	ctx_info->srq_valid = srq_valid;
1086 	ctx_info->srq_id = srq_id;
1087 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1088 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1089 
1090 	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1091 		if (dev->ws_add(&iwdev->vsi, 0)) {
1092 			irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1093 			err_code = -EINVAL;
1094 			goto error;
1095 		}
1096 		irdma_qp_add_qos(&iwqp->sc_qp);
1097 		irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
1098 	} else {
1099 		irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1100 	}
1101 
1102 	err_code = irdma_cqp_create_qp_cmd(iwqp);
1103 	if (err_code)
1104 		goto error;
1105 
1106 	refcount_set(&iwqp->refcnt, 1);
1107 	spin_lock_init(&iwqp->lock);
1108 	spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1109 	iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1110 	rf->qp_table[qp_num] = iwqp;
1111 	init_completion(&iwqp->free_qp);
1112 
1113 	if (udata) {
1114 		/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
1115 		if (udata->outlen < sizeof(uresp)) {
1116 			uresp.lsmm = 1;
1117 			uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
1118 		} else {
1119 			if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1120 				uresp.lsmm = 1;
1121 		}
1122 		uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
1123 		uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
1124 		uresp.qp_id = qp_num;
1125 		uresp.qp_caps = qp->qp_uk.qp_caps;
1126 
1127 		err_code = ib_copy_to_udata(udata, &uresp,
1128 					    min(sizeof(uresp), udata->outlen));
1129 		if (err_code) {
1130 			ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
1131 			irdma_destroy_qp(&iwqp->ibqp, udata);
1132 			return err_code;
1133 		}
1134 	}
1135 
1136 	return 0;
1137 
1138 error:
1139 	irdma_free_qp_rsrc(iwqp);
1140 	return err_code;
1141 }
1142 
irdma_get_ib_acc_flags(struct irdma_qp * iwqp)1143 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1144 {
1145 	int acc_flags = 0;
1146 
1147 	if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1148 		if (iwqp->roce_info.wr_rdresp_en) {
1149 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1150 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1151 		}
1152 		if (iwqp->roce_info.rd_en)
1153 			acc_flags |= IB_ACCESS_REMOTE_READ;
1154 		if (iwqp->roce_info.bind_en)
1155 			acc_flags |= IB_ACCESS_MW_BIND;
1156 		if (iwqp->ctx_info.remote_atomics_en)
1157 			acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1158 	} else {
1159 		if (iwqp->iwarp_info.wr_rdresp_en) {
1160 			acc_flags |= IB_ACCESS_LOCAL_WRITE;
1161 			acc_flags |= IB_ACCESS_REMOTE_WRITE;
1162 		}
1163 		if (iwqp->iwarp_info.rd_en)
1164 			acc_flags |= IB_ACCESS_REMOTE_READ;
1165 		if (iwqp->ctx_info.remote_atomics_en)
1166 			acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1167 	}
1168 	return acc_flags;
1169 }
1170 
1171 /**
1172  * irdma_query_qp - query qp attributes
1173  * @ibqp: qp pointer
1174  * @attr: attributes pointer
1175  * @attr_mask: Not used
1176  * @init_attr: qp attributes to return
1177  */
irdma_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1178 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1179 			  int attr_mask, struct ib_qp_init_attr *init_attr)
1180 {
1181 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1182 	struct irdma_sc_qp *qp = &iwqp->sc_qp;
1183 
1184 	memset(attr, 0, sizeof(*attr));
1185 	memset(init_attr, 0, sizeof(*init_attr));
1186 
1187 	attr->qp_state = iwqp->ibqp_state;
1188 	attr->cur_qp_state = iwqp->ibqp_state;
1189 	attr->cap.max_send_wr = iwqp->max_send_wr;
1190 	attr->cap.max_recv_wr = iwqp->max_recv_wr;
1191 	attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1192 	attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1193 	attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1194 	attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1195 	attr->port_num = 1;
1196 	if (rdma_protocol_roce(ibqp->device, 1)) {
1197 		attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1198 		attr->qkey = iwqp->roce_info.qkey;
1199 		attr->rq_psn = iwqp->udp_info.epsn;
1200 		attr->sq_psn = iwqp->udp_info.psn_nxt;
1201 		attr->dest_qp_num = iwqp->roce_info.dest_qp;
1202 		attr->pkey_index = iwqp->roce_info.p_key;
1203 		attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1204 		attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1205 		attr->min_rnr_timer = iwqp->udp_info.min_rnr_timer;
1206 		attr->max_rd_atomic = iwqp->roce_info.ord_size;
1207 		attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1208 	}
1209 
1210 	init_attr->event_handler = iwqp->ibqp.event_handler;
1211 	init_attr->qp_context = iwqp->ibqp.qp_context;
1212 	init_attr->send_cq = iwqp->ibqp.send_cq;
1213 	init_attr->recv_cq = iwqp->ibqp.recv_cq;
1214 	init_attr->srq = iwqp->ibqp.srq;
1215 	init_attr->cap = attr->cap;
1216 
1217 	return 0;
1218 }
1219 
1220 /**
1221  * irdma_query_pkey - Query partition key
1222  * @ibdev: device pointer from stack
1223  * @port: port number
1224  * @index: index of pkey
1225  * @pkey: pointer to store the pkey
1226  */
irdma_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)1227 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1228 			    u16 *pkey)
1229 {
1230 	if (index >= IRDMA_PKEY_TBL_SZ)
1231 		return -EINVAL;
1232 
1233 	*pkey = IRDMA_DEFAULT_PKEY;
1234 	return 0;
1235 }
1236 
irdma_roce_get_vlan_prio(const struct ib_gid_attr * attr,u8 prio)1237 static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
1238 {
1239 	struct net_device *ndev;
1240 
1241 	rcu_read_lock();
1242 	ndev = rcu_dereference(attr->ndev);
1243 	if (!ndev)
1244 		goto exit;
1245 	if (is_vlan_dev(ndev)) {
1246 		u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
1247 
1248 		prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1249 	}
1250 exit:
1251 	rcu_read_unlock();
1252 	return prio;
1253 }
1254 
irdma_wait_for_suspend(struct irdma_qp * iwqp)1255 static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
1256 {
1257 	if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
1258 				!iwqp->suspend_pending,
1259 				msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
1260 		iwqp->suspend_pending = false;
1261 		ibdev_warn(&iwqp->iwdev->ibdev,
1262 			   "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
1263 			   iwqp->ibqp.qp_num, iwqp->last_aeq);
1264 		return -EBUSY;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
1270 /**
1271  * irdma_modify_qp_roce - modify qp request
1272  * @ibqp: qp's pointer for modify
1273  * @attr: access attributes
1274  * @attr_mask: state mask
1275  * @udata: user data
1276  */
irdma_modify_qp_roce(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1277 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1278 			 int attr_mask, struct ib_udata *udata)
1279 {
1280 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1281 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1282 	struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1283 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1284 	struct irdma_device *iwdev = iwqp->iwdev;
1285 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1286 	struct irdma_qp_host_ctx_info *ctx_info;
1287 	struct irdma_roce_offload_info *roce_info;
1288 	struct irdma_udp_offload_info *udp_info;
1289 	struct irdma_modify_qp_info info = {};
1290 	struct irdma_modify_qp_resp uresp = {};
1291 	struct irdma_modify_qp_req ureq = {};
1292 	unsigned long flags;
1293 	u8 issue_modify_qp = 0;
1294 	int ret = 0;
1295 
1296 	ctx_info = &iwqp->ctx_info;
1297 	roce_info = &iwqp->roce_info;
1298 	udp_info = &iwqp->udp_info;
1299 
1300 	if (udata) {
1301 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1302 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1303 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1304 			return -EINVAL;
1305 	}
1306 
1307 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1308 		return -EOPNOTSUPP;
1309 
1310 	if (attr_mask & IB_QP_DEST_QPN)
1311 		roce_info->dest_qp = attr->dest_qp_num;
1312 
1313 	if (attr_mask & IB_QP_PKEY_INDEX) {
1314 		ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1315 				       &roce_info->p_key);
1316 		if (ret)
1317 			return ret;
1318 	}
1319 
1320 	if (attr_mask & IB_QP_QKEY)
1321 		roce_info->qkey = attr->qkey;
1322 
1323 	if (attr_mask & IB_QP_PATH_MTU)
1324 		udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1325 
1326 	if (attr_mask & IB_QP_SQ_PSN) {
1327 		udp_info->psn_nxt = attr->sq_psn;
1328 		udp_info->lsn =  0xffff;
1329 		udp_info->psn_una = attr->sq_psn;
1330 		udp_info->psn_max = attr->sq_psn;
1331 	}
1332 
1333 	if (attr_mask & IB_QP_RQ_PSN)
1334 		udp_info->epsn = attr->rq_psn;
1335 
1336 	if (attr_mask & IB_QP_RNR_RETRY)
1337 		udp_info->rnr_nak_thresh = attr->rnr_retry;
1338 
1339 	if (attr_mask & IB_QP_MIN_RNR_TIMER &&
1340 	    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
1341 		udp_info->min_rnr_timer = attr->min_rnr_timer;
1342 
1343 	if (attr_mask & IB_QP_RETRY_CNT)
1344 		udp_info->rexmit_thresh = attr->retry_cnt;
1345 
1346 	ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1347 
1348 	if (attr_mask & IB_QP_AV) {
1349 		struct irdma_av *av = &iwqp->roce_ah.av;
1350 		const struct ib_gid_attr *sgid_attr =
1351 				attr->ah_attr.grh.sgid_attr;
1352 		u16 vlan_id = VLAN_N_VID;
1353 		u32 local_ip[4];
1354 
1355 		memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1356 		if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1357 			udp_info->ttl = attr->ah_attr.grh.hop_limit;
1358 			udp_info->flow_label = attr->ah_attr.grh.flow_label;
1359 			udp_info->tos = attr->ah_attr.grh.traffic_class;
1360 			udp_info->src_port =
1361 				rdma_get_udp_sport(udp_info->flow_label,
1362 						   ibqp->qp_num,
1363 						   roce_info->dest_qp);
1364 			irdma_qp_rem_qos(&iwqp->sc_qp);
1365 			dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1366 			if (iwqp->sc_qp.vsi->dscp_mode)
1367 				ctx_info->user_pri =
1368 				iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1369 			else
1370 				ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1371 		}
1372 		ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1373 					      ctx_info->roce_info->mac_addr);
1374 		if (ret)
1375 			return ret;
1376 		ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
1377 							      ctx_info->user_pri);
1378 		if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1379 			return -ENOMEM;
1380 		iwqp->sc_qp.user_pri = ctx_info->user_pri;
1381 		irdma_qp_add_qos(&iwqp->sc_qp);
1382 
1383 		if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1384 			vlan_id = 0;
1385 		if (vlan_id < VLAN_N_VID) {
1386 			udp_info->insert_vlan_tag = true;
1387 			udp_info->vlan_tag = vlan_id |
1388 				ctx_info->user_pri << VLAN_PRIO_SHIFT;
1389 		} else {
1390 			udp_info->insert_vlan_tag = false;
1391 		}
1392 
1393 		av->attrs = attr->ah_attr;
1394 		rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1395 		rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1396 		av->net_type = rdma_gid_attr_network_type(sgid_attr);
1397 		if (av->net_type == RDMA_NETWORK_IPV6) {
1398 			__be32 *daddr =
1399 				av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1400 			__be32 *saddr =
1401 				av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1402 
1403 			irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1404 			irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1405 
1406 			udp_info->ipv4 = false;
1407 			irdma_copy_ip_ntohl(local_ip, daddr);
1408 
1409 		} else if (av->net_type == RDMA_NETWORK_IPV4) {
1410 			__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1411 			__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1412 
1413 			local_ip[0] = ntohl(daddr);
1414 
1415 			udp_info->ipv4 = true;
1416 			udp_info->dest_ip_addr[0] = 0;
1417 			udp_info->dest_ip_addr[1] = 0;
1418 			udp_info->dest_ip_addr[2] = 0;
1419 			udp_info->dest_ip_addr[3] = local_ip[0];
1420 
1421 			udp_info->local_ipaddr[0] = 0;
1422 			udp_info->local_ipaddr[1] = 0;
1423 			udp_info->local_ipaddr[2] = 0;
1424 			udp_info->local_ipaddr[3] = ntohl(saddr);
1425 		}
1426 		udp_info->arp_idx =
1427 			irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1428 				      attr->ah_attr.roce.dmac);
1429 	}
1430 
1431 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1432 		if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1433 			ibdev_err(&iwdev->ibdev,
1434 				  "rd_atomic = %d, above max_hw_ord=%d\n",
1435 				  attr->max_rd_atomic,
1436 				  dev->hw_attrs.max_hw_ord);
1437 			return -EINVAL;
1438 		}
1439 		if (attr->max_rd_atomic)
1440 			roce_info->ord_size = attr->max_rd_atomic;
1441 		info.ord_valid = true;
1442 	}
1443 
1444 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1445 		if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1446 			ibdev_err(&iwdev->ibdev,
1447 				  "rd_atomic = %d, above max_hw_ird=%d\n",
1448 				   attr->max_dest_rd_atomic,
1449 				   dev->hw_attrs.max_hw_ird);
1450 			return -EINVAL;
1451 		}
1452 		if (attr->max_dest_rd_atomic)
1453 			roce_info->ird_size = attr->max_dest_rd_atomic;
1454 	}
1455 
1456 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1457 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1458 			roce_info->wr_rdresp_en = true;
1459 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1460 			roce_info->wr_rdresp_en = true;
1461 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1462 			roce_info->rd_en = true;
1463 		if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
1464 			if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
1465 				ctx_info->remote_atomics_en = true;
1466 	}
1467 
1468 	ibdev_dbg(&iwdev->ibdev,
1469 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1470 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1471 		  iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1472 
1473 	spin_lock_irqsave(&iwqp->lock, flags);
1474 	if (attr_mask & IB_QP_STATE) {
1475 		if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1476 					iwqp->ibqp.qp_type, attr_mask)) {
1477 			ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1478 				   iwqp->ibqp.qp_num, iwqp->ibqp_state,
1479 				   attr->qp_state);
1480 			ret = -EINVAL;
1481 			goto exit;
1482 		}
1483 		info.curr_iwarp_state = iwqp->iwarp_state;
1484 
1485 		switch (attr->qp_state) {
1486 		case IB_QPS_INIT:
1487 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1488 				ret = -EINVAL;
1489 				goto exit;
1490 			}
1491 
1492 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1493 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1494 				issue_modify_qp = 1;
1495 			}
1496 			break;
1497 		case IB_QPS_RTR:
1498 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1499 				ret = -EINVAL;
1500 				goto exit;
1501 			}
1502 			info.arp_cache_idx_valid = true;
1503 			info.cq_num_valid = true;
1504 			info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1505 			issue_modify_qp = 1;
1506 			break;
1507 		case IB_QPS_RTS:
1508 			if (iwqp->ibqp_state < IB_QPS_RTR ||
1509 			    iwqp->ibqp_state == IB_QPS_ERR) {
1510 				ret = -EINVAL;
1511 				goto exit;
1512 			}
1513 
1514 			info.arp_cache_idx_valid = true;
1515 			info.cq_num_valid = true;
1516 			info.ord_valid = true;
1517 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1518 			issue_modify_qp = 1;
1519 			if (iwdev->push_mode && udata &&
1520 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1521 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1522 				spin_unlock_irqrestore(&iwqp->lock, flags);
1523 				irdma_alloc_push_page(iwqp);
1524 				spin_lock_irqsave(&iwqp->lock, flags);
1525 			}
1526 			break;
1527 		case IB_QPS_SQD:
1528 			if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1529 				goto exit;
1530 
1531 			if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1532 				ret = -EINVAL;
1533 				goto exit;
1534 			}
1535 
1536 			info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1537 			issue_modify_qp = 1;
1538 			iwqp->suspend_pending = true;
1539 			break;
1540 		case IB_QPS_SQE:
1541 		case IB_QPS_ERR:
1542 		case IB_QPS_RESET:
1543 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1544 				iwqp->ibqp_state = attr->qp_state;
1545 				spin_unlock_irqrestore(&iwqp->lock, flags);
1546 				if (udata && udata->inlen) {
1547 					if (ib_copy_from_udata(&ureq, udata,
1548 					    min(sizeof(ureq), udata->inlen)))
1549 						return -EINVAL;
1550 
1551 					irdma_flush_wqes(iwqp,
1552 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1553 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1554 					    IRDMA_REFLUSH);
1555 				}
1556 				return 0;
1557 			}
1558 
1559 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1560 			issue_modify_qp = 1;
1561 			break;
1562 		default:
1563 			ret = -EINVAL;
1564 			goto exit;
1565 		}
1566 
1567 		iwqp->ibqp_state = attr->qp_state;
1568 	}
1569 
1570 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1571 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1572 	irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1573 	spin_unlock_irqrestore(&iwqp->lock, flags);
1574 
1575 	if (attr_mask & IB_QP_STATE) {
1576 		if (issue_modify_qp) {
1577 			ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1578 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1579 				return -EINVAL;
1580 			if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
1581 				ret = irdma_wait_for_suspend(iwqp);
1582 				if (ret)
1583 					return ret;
1584 			}
1585 			spin_lock_irqsave(&iwqp->lock, flags);
1586 			if (iwqp->iwarp_state == info.curr_iwarp_state) {
1587 				iwqp->iwarp_state = info.next_iwarp_state;
1588 				iwqp->ibqp_state = attr->qp_state;
1589 			}
1590 			if (iwqp->ibqp_state > IB_QPS_RTS &&
1591 			    !iwqp->flush_issued) {
1592 				spin_unlock_irqrestore(&iwqp->lock, flags);
1593 				irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1594 						       IRDMA_FLUSH_RQ |
1595 						       IRDMA_FLUSH_WAIT);
1596 				iwqp->flush_issued = 1;
1597 			} else {
1598 				spin_unlock_irqrestore(&iwqp->lock, flags);
1599 			}
1600 		} else {
1601 			iwqp->ibqp_state = attr->qp_state;
1602 		}
1603 		if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1604 			struct irdma_ucontext *ucontext;
1605 
1606 			ucontext = rdma_udata_to_drv_context(udata,
1607 					struct irdma_ucontext, ibucontext);
1608 			if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1609 			    !iwqp->push_wqe_mmap_entry &&
1610 			    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1611 				&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1612 				uresp.push_valid = 1;
1613 				uresp.push_offset = iwqp->sc_qp.push_offset;
1614 			}
1615 			ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1616 					       udata->outlen));
1617 			if (ret) {
1618 				irdma_remove_push_mmap_entries(iwqp);
1619 				ibdev_dbg(&iwdev->ibdev,
1620 					  "VERBS: copy_to_udata failed\n");
1621 				return ret;
1622 			}
1623 		}
1624 	}
1625 
1626 	return 0;
1627 exit:
1628 	spin_unlock_irqrestore(&iwqp->lock, flags);
1629 
1630 	return ret;
1631 }
1632 
1633 /**
1634  * irdma_modify_qp - modify qp request
1635  * @ibqp: qp's pointer for modify
1636  * @attr: access attributes
1637  * @attr_mask: state mask
1638  * @udata: user data
1639  */
irdma_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1640 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1641 		    struct ib_udata *udata)
1642 {
1643 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1644 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1645 	struct irdma_qp *iwqp = to_iwqp(ibqp);
1646 	struct irdma_device *iwdev = iwqp->iwdev;
1647 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1648 	struct irdma_qp_host_ctx_info *ctx_info;
1649 	struct irdma_tcp_offload_info *tcp_info;
1650 	struct irdma_iwarp_offload_info *offload_info;
1651 	struct irdma_modify_qp_info info = {};
1652 	struct irdma_modify_qp_resp uresp = {};
1653 	struct irdma_modify_qp_req ureq = {};
1654 	u8 issue_modify_qp = 0;
1655 	u8 dont_wait = 0;
1656 	int err;
1657 	unsigned long flags;
1658 
1659 	if (udata) {
1660 		/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1661 		if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1662 		    (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1663 			return -EINVAL;
1664 	}
1665 
1666 	if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1667 		return -EOPNOTSUPP;
1668 
1669 	ctx_info = &iwqp->ctx_info;
1670 	offload_info = &iwqp->iwarp_info;
1671 	tcp_info = &iwqp->tcp_info;
1672 	wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1673 	ibdev_dbg(&iwdev->ibdev,
1674 		  "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1675 		  __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1676 		  iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1677 		  iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1678 
1679 	spin_lock_irqsave(&iwqp->lock, flags);
1680 	if (attr_mask & IB_QP_STATE) {
1681 		info.curr_iwarp_state = iwqp->iwarp_state;
1682 		switch (attr->qp_state) {
1683 		case IB_QPS_INIT:
1684 		case IB_QPS_RTR:
1685 			if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1686 				err = -EINVAL;
1687 				goto exit;
1688 			}
1689 
1690 			if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1691 				info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1692 				issue_modify_qp = 1;
1693 			}
1694 			if (iwdev->push_mode && udata &&
1695 			    iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1696 			    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1697 				spin_unlock_irqrestore(&iwqp->lock, flags);
1698 				irdma_alloc_push_page(iwqp);
1699 				spin_lock_irqsave(&iwqp->lock, flags);
1700 			}
1701 			break;
1702 		case IB_QPS_RTS:
1703 			if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1704 			    !iwqp->cm_id) {
1705 				err = -EINVAL;
1706 				goto exit;
1707 			}
1708 
1709 			issue_modify_qp = 1;
1710 			iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1711 			iwqp->hte_added = 1;
1712 			info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1713 			info.tcp_ctx_valid = true;
1714 			info.ord_valid = true;
1715 			info.arp_cache_idx_valid = true;
1716 			info.cq_num_valid = true;
1717 			break;
1718 		case IB_QPS_SQD:
1719 			if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1720 				err = 0;
1721 				goto exit;
1722 			}
1723 
1724 			if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1725 			    iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1726 				err = 0;
1727 				goto exit;
1728 			}
1729 
1730 			if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1731 				err = -EINVAL;
1732 				goto exit;
1733 			}
1734 
1735 			info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1736 			issue_modify_qp = 1;
1737 			break;
1738 		case IB_QPS_SQE:
1739 			if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1740 				err = -EINVAL;
1741 				goto exit;
1742 			}
1743 
1744 			info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1745 			issue_modify_qp = 1;
1746 			break;
1747 		case IB_QPS_ERR:
1748 		case IB_QPS_RESET:
1749 			if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1750 				iwqp->ibqp_state = attr->qp_state;
1751 				spin_unlock_irqrestore(&iwqp->lock, flags);
1752 				if (udata && udata->inlen) {
1753 					if (ib_copy_from_udata(&ureq, udata,
1754 					    min(sizeof(ureq), udata->inlen)))
1755 						return -EINVAL;
1756 
1757 					irdma_flush_wqes(iwqp,
1758 					    (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1759 					    (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1760 					    IRDMA_REFLUSH);
1761 				}
1762 				return 0;
1763 			}
1764 
1765 			if (iwqp->sc_qp.term_flags) {
1766 				spin_unlock_irqrestore(&iwqp->lock, flags);
1767 				irdma_terminate_del_timer(&iwqp->sc_qp);
1768 				spin_lock_irqsave(&iwqp->lock, flags);
1769 			}
1770 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1771 			if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1772 			    iwdev->iw_status &&
1773 			    iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1774 				info.reset_tcp_conn = true;
1775 			else
1776 				dont_wait = 1;
1777 
1778 			issue_modify_qp = 1;
1779 			info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1780 			break;
1781 		default:
1782 			err = -EINVAL;
1783 			goto exit;
1784 		}
1785 
1786 		iwqp->ibqp_state = attr->qp_state;
1787 	}
1788 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
1789 		ctx_info->iwarp_info_valid = true;
1790 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1791 			offload_info->wr_rdresp_en = true;
1792 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1793 			offload_info->wr_rdresp_en = true;
1794 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1795 			offload_info->rd_en = true;
1796 	}
1797 
1798 	if (ctx_info->iwarp_info_valid) {
1799 		ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1800 		ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1801 		irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1802 	}
1803 	spin_unlock_irqrestore(&iwqp->lock, flags);
1804 
1805 	if (attr_mask & IB_QP_STATE) {
1806 		if (issue_modify_qp) {
1807 			ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1808 			if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1809 				return -EINVAL;
1810 		}
1811 
1812 		spin_lock_irqsave(&iwqp->lock, flags);
1813 		if (iwqp->iwarp_state == info.curr_iwarp_state) {
1814 			iwqp->iwarp_state = info.next_iwarp_state;
1815 			iwqp->ibqp_state = attr->qp_state;
1816 		}
1817 		spin_unlock_irqrestore(&iwqp->lock, flags);
1818 	}
1819 
1820 	if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1821 		if (dont_wait) {
1822 			if (iwqp->hw_tcp_state) {
1823 				spin_lock_irqsave(&iwqp->lock, flags);
1824 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1825 				iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1826 				spin_unlock_irqrestore(&iwqp->lock, flags);
1827 			}
1828 			irdma_cm_disconn(iwqp);
1829 		} else {
1830 			int close_timer_started;
1831 
1832 			spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1833 
1834 			if (iwqp->cm_node) {
1835 				refcount_inc(&iwqp->cm_node->refcnt);
1836 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1837 				close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1838 				if (iwqp->cm_id && close_timer_started == 1)
1839 					irdma_schedule_cm_timer(iwqp->cm_node,
1840 						(struct irdma_puda_buf *)iwqp,
1841 						IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1842 
1843 				irdma_rem_ref_cm_node(iwqp->cm_node);
1844 			} else {
1845 				spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1846 			}
1847 		}
1848 	}
1849 	if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1850 	    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1851 		struct irdma_ucontext *ucontext;
1852 
1853 		ucontext = rdma_udata_to_drv_context(udata,
1854 					struct irdma_ucontext, ibucontext);
1855 		if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1856 		    !iwqp->push_wqe_mmap_entry &&
1857 		    !irdma_setup_push_mmap_entries(ucontext, iwqp,
1858 			&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1859 			uresp.push_valid = 1;
1860 			uresp.push_offset = iwqp->sc_qp.push_offset;
1861 		}
1862 
1863 		err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1864 				       udata->outlen));
1865 		if (err) {
1866 			irdma_remove_push_mmap_entries(iwqp);
1867 			ibdev_dbg(&iwdev->ibdev,
1868 				  "VERBS: copy_to_udata failed\n");
1869 			return err;
1870 		}
1871 	}
1872 
1873 	return 0;
1874 exit:
1875 	spin_unlock_irqrestore(&iwqp->lock, flags);
1876 
1877 	return err;
1878 }
1879 
1880 /**
1881  * irdma_srq_free_rsrc - free up resources for srq
1882  * @rf: RDMA PCI function
1883  * @iwsrq: srq ptr
1884  */
irdma_srq_free_rsrc(struct irdma_pci_f * rf,struct irdma_srq * iwsrq)1885 static void irdma_srq_free_rsrc(struct irdma_pci_f *rf, struct irdma_srq *iwsrq)
1886 {
1887 	struct irdma_sc_srq *srq = &iwsrq->sc_srq;
1888 
1889 	if (!iwsrq->user_mode) {
1890 		dma_free_coherent(rf->sc_dev.hw->device, iwsrq->kmem.size,
1891 				  iwsrq->kmem.va, iwsrq->kmem.pa);
1892 		iwsrq->kmem.va = NULL;
1893 	}
1894 
1895 	irdma_free_rsrc(rf, rf->allocated_srqs, srq->srq_uk.srq_id);
1896 }
1897 
1898 /**
1899  * irdma_cq_free_rsrc - free up resources for cq
1900  * @rf: RDMA PCI function
1901  * @iwcq: cq ptr
1902  */
irdma_cq_free_rsrc(struct irdma_pci_f * rf,struct irdma_cq * iwcq)1903 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1904 {
1905 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1906 
1907 	if (!iwcq->user_mode) {
1908 		dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1909 				  iwcq->kmem.va, iwcq->kmem.pa);
1910 		iwcq->kmem.va = NULL;
1911 		dma_free_coherent(rf->sc_dev.hw->device,
1912 				  iwcq->kmem_shadow.size,
1913 				  iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1914 		iwcq->kmem_shadow.va = NULL;
1915 	}
1916 
1917 	irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1918 }
1919 
1920 /**
1921  * irdma_free_cqbuf - worker to free a cq buffer
1922  * @work: provides access to the cq buffer to free
1923  */
irdma_free_cqbuf(struct work_struct * work)1924 static void irdma_free_cqbuf(struct work_struct *work)
1925 {
1926 	struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1927 
1928 	dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1929 			  cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1930 	cq_buf->kmem_buf.va = NULL;
1931 	kfree(cq_buf);
1932 }
1933 
1934 /**
1935  * irdma_process_resize_list - remove resized cq buffers from the resize_list
1936  * @iwcq: cq which owns the resize_list
1937  * @iwdev: irdma device
1938  * @lcqe_buf: the buffer where the last cqe is received
1939  */
irdma_process_resize_list(struct irdma_cq * iwcq,struct irdma_device * iwdev,struct irdma_cq_buf * lcqe_buf)1940 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1941 				     struct irdma_device *iwdev,
1942 				     struct irdma_cq_buf *lcqe_buf)
1943 {
1944 	struct list_head *tmp_node, *list_node;
1945 	struct irdma_cq_buf *cq_buf;
1946 	int cnt = 0;
1947 
1948 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1949 		cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1950 		if (cq_buf == lcqe_buf)
1951 			return cnt;
1952 
1953 		list_del(&cq_buf->list);
1954 		queue_work(iwdev->cleanup_wq, &cq_buf->work);
1955 		cnt++;
1956 	}
1957 
1958 	return cnt;
1959 }
1960 
1961 /**
1962  * irdma_destroy_srq - destroy srq
1963  * @ibsrq: srq pointer
1964  * @udata: user data
1965  */
irdma_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)1966 static int irdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1967 {
1968 	struct irdma_device *iwdev = to_iwdev(ibsrq->device);
1969 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
1970 	struct irdma_sc_srq *srq = &iwsrq->sc_srq;
1971 
1972 	irdma_srq_wq_destroy(iwdev->rf, srq);
1973 	irdma_srq_free_rsrc(iwdev->rf, iwsrq);
1974 	return 0;
1975 }
1976 
1977 /**
1978  * irdma_destroy_cq - destroy cq
1979  * @ib_cq: cq pointer
1980  * @udata: user data
1981  */
irdma_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)1982 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1983 {
1984 	struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1985 	struct irdma_cq *iwcq = to_iwcq(ib_cq);
1986 	struct irdma_sc_cq *cq = &iwcq->sc_cq;
1987 	struct irdma_sc_dev *dev = cq->dev;
1988 	struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1989 	struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1990 	unsigned long flags;
1991 
1992 	spin_lock_irqsave(&iwcq->lock, flags);
1993 	if (!list_empty(&iwcq->cmpl_generated))
1994 		irdma_remove_cmpls_list(iwcq);
1995 	if (!list_empty(&iwcq->resize_list))
1996 		irdma_process_resize_list(iwcq, iwdev, NULL);
1997 	spin_unlock_irqrestore(&iwcq->lock, flags);
1998 
1999 	irdma_cq_rem_ref(ib_cq);
2000 	wait_for_completion(&iwcq->free_cq);
2001 
2002 	irdma_cq_wq_destroy(iwdev->rf, cq);
2003 
2004 	spin_lock_irqsave(&iwceq->ce_lock, flags);
2005 	irdma_sc_cleanup_ceqes(cq, ceq);
2006 	spin_unlock_irqrestore(&iwceq->ce_lock, flags);
2007 	irdma_cq_free_rsrc(iwdev->rf, iwcq);
2008 
2009 	return 0;
2010 }
2011 
2012 /**
2013  * irdma_resize_cq - resize cq
2014  * @ibcq: cq to be resized
2015  * @entries: desired cq size
2016  * @udata: user data
2017  */
irdma_resize_cq(struct ib_cq * ibcq,unsigned int entries,struct ib_udata * udata)2018 static int irdma_resize_cq(struct ib_cq *ibcq, unsigned int entries,
2019 			   struct ib_udata *udata)
2020 {
2021 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
2022 	struct irdma_cq *iwcq = to_iwcq(ibcq);
2023 	struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
2024 	struct irdma_cqp_request *cqp_request;
2025 	struct cqp_cmds_info *cqp_info;
2026 	struct irdma_modify_cq_info *m_info;
2027 	struct irdma_modify_cq_info info = {};
2028 	struct irdma_dma_mem kmem_buf;
2029 	struct irdma_cq_mr *cqmr_buf;
2030 	struct irdma_pbl *iwpbl_buf;
2031 	struct irdma_device *iwdev;
2032 	struct irdma_pci_f *rf;
2033 	struct irdma_cq_buf *cq_buf = NULL;
2034 	unsigned long flags;
2035 	u8 cqe_size;
2036 	int ret;
2037 
2038 	iwdev = to_iwdev(ibcq->device);
2039 	rf = iwdev->rf;
2040 
2041 	if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2042 	    IRDMA_FEATURE_CQ_RESIZE))
2043 		return -EOPNOTSUPP;
2044 
2045 	if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
2046 		return -EINVAL;
2047 
2048 	if (entries > rf->max_cqe)
2049 		return -EINVAL;
2050 
2051 	if (!iwcq->user_mode) {
2052 		entries += 2;
2053 
2054 		if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct &&
2055 		    dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2056 			entries *= 2;
2057 
2058 		if (entries & 1)
2059 			entries += 1; /* cq size must be an even number */
2060 
2061 		cqe_size = iwcq->sc_cq.cq_uk.avoid_mem_cflct ? 64 : 32;
2062 		if (entries * cqe_size == IRDMA_HW_PAGE_SIZE)
2063 			entries += 2;
2064 	}
2065 
2066 	info.cq_size = max(entries, 4);
2067 
2068 	if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
2069 		return 0;
2070 
2071 	if (udata) {
2072 		struct irdma_resize_cq_req req = {};
2073 		struct irdma_ucontext *ucontext =
2074 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2075 						  ibucontext);
2076 
2077 		/* CQ resize not supported with legacy GEN_1 libi40iw */
2078 		if (ucontext->legacy_mode)
2079 			return -EOPNOTSUPP;
2080 
2081 		if (ib_copy_from_udata(&req, udata,
2082 				       min(sizeof(req), udata->inlen)))
2083 			return -EINVAL;
2084 
2085 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2086 		iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
2087 					  &ucontext->cq_reg_mem_list);
2088 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2089 
2090 		if (!iwpbl_buf)
2091 			return -ENOMEM;
2092 
2093 		cqmr_buf = &iwpbl_buf->cq_mr;
2094 		if (iwpbl_buf->pbl_allocated) {
2095 			info.virtual_map = true;
2096 			info.pbl_chunk_size = 1;
2097 			info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
2098 		} else {
2099 			info.cq_pa = cqmr_buf->cq_pbl.addr;
2100 		}
2101 	} else {
2102 		/* Kmode CQ resize */
2103 		int rsize;
2104 
2105 		rsize = info.cq_size * sizeof(struct irdma_cqe);
2106 		kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
2107 		kmem_buf.va = dma_alloc_coherent(dev->hw->device,
2108 						 kmem_buf.size, &kmem_buf.pa,
2109 						 GFP_KERNEL);
2110 		if (!kmem_buf.va)
2111 			return -ENOMEM;
2112 
2113 		info.cq_base = kmem_buf.va;
2114 		info.cq_pa = kmem_buf.pa;
2115 		cq_buf = kzalloc_obj(*cq_buf);
2116 		if (!cq_buf) {
2117 			ret = -ENOMEM;
2118 			goto error;
2119 		}
2120 	}
2121 
2122 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2123 	if (!cqp_request) {
2124 		ret = -ENOMEM;
2125 		goto error;
2126 	}
2127 
2128 	info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
2129 	info.cq_resize = true;
2130 
2131 	cqp_info = &cqp_request->info;
2132 	m_info = &cqp_info->in.u.cq_modify.info;
2133 	memcpy(m_info, &info, sizeof(*m_info));
2134 
2135 	cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
2136 	cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
2137 	cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
2138 	cqp_info->post_sq = 1;
2139 	ret = irdma_handle_cqp_op(rf, cqp_request);
2140 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2141 	if (ret)
2142 		goto error;
2143 
2144 	spin_lock_irqsave(&iwcq->lock, flags);
2145 	if (cq_buf) {
2146 		cq_buf->kmem_buf = iwcq->kmem;
2147 		cq_buf->hw = dev->hw;
2148 		memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
2149 		INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
2150 		list_add_tail(&cq_buf->list, &iwcq->resize_list);
2151 		iwcq->kmem = kmem_buf;
2152 	}
2153 
2154 	irdma_sc_cq_resize(&iwcq->sc_cq, &info);
2155 	ibcq->cqe = info.cq_size - 1;
2156 	spin_unlock_irqrestore(&iwcq->lock, flags);
2157 
2158 	return 0;
2159 error:
2160 	if (!udata) {
2161 		dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
2162 				  kmem_buf.pa);
2163 		kmem_buf.va = NULL;
2164 	}
2165 	kfree(cq_buf);
2166 
2167 	return ret;
2168 }
2169 
2170 /**
2171  * irdma_srq_event - event notification for srq limit
2172  * @srq: shared srq struct
2173  */
irdma_srq_event(struct irdma_sc_srq * srq)2174 void irdma_srq_event(struct irdma_sc_srq *srq)
2175 {
2176 	struct irdma_srq *iwsrq = container_of(srq, struct irdma_srq, sc_srq);
2177 	struct ib_srq *ibsrq = &iwsrq->ibsrq;
2178 	struct ib_event event;
2179 
2180 	srq->srq_limit = 0;
2181 
2182 	if (!ibsrq->event_handler)
2183 		return;
2184 
2185 	event.device = ibsrq->device;
2186 	event.element.port_num = 1;
2187 	event.element.srq = ibsrq;
2188 	event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2189 	ibsrq->event_handler(&event, ibsrq->srq_context);
2190 }
2191 
2192 /**
2193  * irdma_modify_srq - modify srq request
2194  * @ibsrq: srq's pointer for modify
2195  * @attr: access attributes
2196  * @attr_mask: state mask
2197  * @udata: user data
2198  */
irdma_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)2199 static int irdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2200 			    enum ib_srq_attr_mask attr_mask,
2201 			    struct ib_udata *udata)
2202 {
2203 	struct irdma_device *iwdev = to_iwdev(ibsrq->device);
2204 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
2205 	struct irdma_cqp_request *cqp_request;
2206 	struct irdma_pci_f *rf = iwdev->rf;
2207 	struct irdma_modify_srq_info *info;
2208 	struct cqp_cmds_info *cqp_info;
2209 	int status;
2210 
2211 	if (attr_mask & IB_SRQ_MAX_WR)
2212 		return -EINVAL;
2213 
2214 	if (!(attr_mask & IB_SRQ_LIMIT))
2215 		return 0;
2216 
2217 	if (attr->srq_limit > iwsrq->sc_srq.srq_uk.srq_size)
2218 		return -EINVAL;
2219 
2220 	/* Execute this cqp op synchronously, so we can update srq_limit
2221 	 * upon successful completion.
2222 	 */
2223 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2224 	if (!cqp_request)
2225 		return -ENOMEM;
2226 
2227 	cqp_info = &cqp_request->info;
2228 	info = &cqp_info->in.u.srq_modify.info;
2229 	info->srq_limit = attr->srq_limit;
2230 	if (info->srq_limit > 0xFFF)
2231 		info->srq_limit = 0xFFF;
2232 	info->arm_limit_event = 1;
2233 
2234 	cqp_info->cqp_cmd = IRDMA_OP_SRQ_MODIFY;
2235 	cqp_info->post_sq = 1;
2236 	cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq;
2237 	cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request;
2238 	status = irdma_handle_cqp_op(rf, cqp_request);
2239 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2240 	if (status)
2241 		return status;
2242 
2243 	iwsrq->sc_srq.srq_limit = info->srq_limit;
2244 
2245 	return 0;
2246 }
2247 
irdma_setup_umode_srq(struct irdma_device * iwdev,struct irdma_srq * iwsrq,struct irdma_srq_init_info * info,struct ib_udata * udata)2248 static int irdma_setup_umode_srq(struct irdma_device *iwdev,
2249 				 struct irdma_srq *iwsrq,
2250 				 struct irdma_srq_init_info *info,
2251 				 struct ib_udata *udata)
2252 {
2253 #define IRDMA_CREATE_SRQ_MIN_REQ_LEN \
2254 	offsetofend(struct irdma_create_srq_req, user_shadow_area)
2255 	struct irdma_create_srq_req req = {};
2256 	struct irdma_ucontext *ucontext;
2257 	struct irdma_srq_mr *srqmr;
2258 	struct irdma_pbl *iwpbl;
2259 	unsigned long flags;
2260 
2261 	iwsrq->user_mode = true;
2262 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2263 					     ibucontext);
2264 
2265 	if (udata->inlen < IRDMA_CREATE_SRQ_MIN_REQ_LEN)
2266 		return -EINVAL;
2267 
2268 	if (ib_copy_from_udata(&req, udata,
2269 			       min(sizeof(req), udata->inlen)))
2270 		return -EFAULT;
2271 
2272 	spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
2273 	iwpbl = irdma_get_pbl((unsigned long)req.user_srq_buf,
2274 			      &ucontext->srq_reg_mem_list);
2275 	spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
2276 	if (!iwpbl)
2277 		return -EPROTO;
2278 
2279 	iwsrq->iwpbl = iwpbl;
2280 	srqmr = &iwpbl->srq_mr;
2281 
2282 	if (iwpbl->pbl_allocated) {
2283 		info->virtual_map = true;
2284 		info->pbl_chunk_size = 1;
2285 		info->first_pm_pbl_idx = srqmr->srq_pbl.idx;
2286 		info->leaf_pbl_size = 1;
2287 	} else {
2288 		info->srq_pa = srqmr->srq_pbl.addr;
2289 	}
2290 	info->shadow_area_pa = srqmr->shadow;
2291 
2292 	return 0;
2293 }
2294 
irdma_setup_kmode_srq(struct irdma_device * iwdev,struct irdma_srq * iwsrq,struct irdma_srq_init_info * info,u32 depth,u8 shift)2295 static int irdma_setup_kmode_srq(struct irdma_device *iwdev,
2296 				 struct irdma_srq *iwsrq,
2297 				 struct irdma_srq_init_info *info, u32 depth,
2298 				 u8 shift)
2299 {
2300 	struct irdma_srq_uk_init_info *ukinfo = &info->srq_uk_init_info;
2301 	struct irdma_dma_mem *mem = &iwsrq->kmem;
2302 	u32 size, ring_size;
2303 
2304 	ring_size = depth * IRDMA_QP_WQE_MIN_SIZE;
2305 	size = ring_size + (IRDMA_SHADOW_AREA_SIZE << 3);
2306 
2307 	mem->size = ALIGN(size, 256);
2308 	mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
2309 				     &mem->pa, GFP_KERNEL);
2310 	if (!mem->va)
2311 		return -ENOMEM;
2312 
2313 	ukinfo->srq = mem->va;
2314 	ukinfo->srq_size = depth >> shift;
2315 	ukinfo->shadow_area = mem->va + ring_size;
2316 
2317 	info->srq_pa = mem->pa;
2318 	info->shadow_area_pa = info->srq_pa + ring_size;
2319 
2320 	return 0;
2321 }
2322 
2323 /**
2324  * irdma_create_srq - create srq
2325  * @ibsrq: ib's srq pointer
2326  * @initattrs: attributes for srq
2327  * @udata: user data for create srq
2328  */
irdma_create_srq(struct ib_srq * ibsrq,struct ib_srq_init_attr * initattrs,struct ib_udata * udata)2329 static int irdma_create_srq(struct ib_srq *ibsrq,
2330 			    struct ib_srq_init_attr *initattrs,
2331 			    struct ib_udata *udata)
2332 {
2333 	struct irdma_device *iwdev = to_iwdev(ibsrq->device);
2334 	struct ib_srq_attr *attr = &initattrs->attr;
2335 	struct irdma_pd *iwpd = to_iwpd(ibsrq->pd);
2336 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
2337 	struct irdma_srq_uk_init_info *ukinfo;
2338 	struct irdma_cqp_request *cqp_request;
2339 	struct irdma_srq_init_info info = {};
2340 	struct irdma_pci_f *rf = iwdev->rf;
2341 	struct irdma_uk_attrs *uk_attrs;
2342 	struct cqp_cmds_info *cqp_info;
2343 	int err_code = 0;
2344 	u32 depth;
2345 	u8 shift;
2346 
2347 	uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs;
2348 	ukinfo = &info.srq_uk_init_info;
2349 
2350 	if (initattrs->srq_type != IB_SRQT_BASIC)
2351 		return -EOPNOTSUPP;
2352 
2353 	if (!(uk_attrs->feature_flags & IRDMA_FEATURE_SRQ) ||
2354 	    attr->max_sge > uk_attrs->max_hw_wq_frags)
2355 		return -EINVAL;
2356 
2357 	refcount_set(&iwsrq->refcnt, 1);
2358 	spin_lock_init(&iwsrq->lock);
2359 	err_code = irdma_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq,
2360 				    &iwsrq->srq_num, &rf->next_srq);
2361 	if (err_code)
2362 		return err_code;
2363 
2364 	ukinfo->max_srq_frag_cnt = attr->max_sge;
2365 	ukinfo->uk_attrs = uk_attrs;
2366 	ukinfo->srq_id = iwsrq->srq_num;
2367 
2368 	irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_srq_frag_cnt, 0,
2369 			    &shift);
2370 
2371 	err_code = irdma_get_srqdepth(ukinfo->uk_attrs, attr->max_wr,
2372 				      shift, &depth);
2373 	if (err_code)
2374 		return err_code;
2375 
2376 	/* Actual SRQ size in WRs for ring and HW */
2377 	ukinfo->srq_size = depth >> shift;
2378 
2379 	/* Max postable WRs to SRQ */
2380 	iwsrq->max_wr = (depth - IRDMA_RQ_RSVD) >> shift;
2381 	attr->max_wr = iwsrq->max_wr;
2382 
2383 	if (udata)
2384 		err_code = irdma_setup_umode_srq(iwdev, iwsrq, &info, udata);
2385 	else
2386 		err_code = irdma_setup_kmode_srq(iwdev, iwsrq, &info, depth,
2387 						 shift);
2388 
2389 	if (err_code)
2390 		goto free_rsrc;
2391 
2392 	info.vsi = &iwdev->vsi;
2393 	info.pd = &iwpd->sc_pd;
2394 
2395 	iwsrq->sc_srq.srq_uk.lock = &iwsrq->lock;
2396 	err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info);
2397 	if (err_code)
2398 		goto free_dmem;
2399 
2400 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2401 	if (!cqp_request) {
2402 		err_code = -ENOMEM;
2403 		goto free_dmem;
2404 	}
2405 
2406 	cqp_info = &cqp_request->info;
2407 	cqp_info->cqp_cmd = IRDMA_OP_SRQ_CREATE;
2408 	cqp_info->post_sq = 1;
2409 	cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq;
2410 	cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request;
2411 	err_code = irdma_handle_cqp_op(rf, cqp_request);
2412 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2413 	if (err_code)
2414 		goto free_dmem;
2415 
2416 	if (udata) {
2417 		struct irdma_create_srq_resp resp = {};
2418 
2419 		resp.srq_id = iwsrq->srq_num;
2420 		resp.srq_size = ukinfo->srq_size;
2421 		if (ib_copy_to_udata(udata, &resp,
2422 				     min(sizeof(resp), udata->outlen))) {
2423 			err_code = -EPROTO;
2424 			goto srq_destroy;
2425 		}
2426 	}
2427 
2428 	return 0;
2429 
2430 srq_destroy:
2431 	irdma_srq_wq_destroy(rf, &iwsrq->sc_srq);
2432 
2433 free_dmem:
2434 	if (!iwsrq->user_mode)
2435 		dma_free_coherent(rf->hw.device, iwsrq->kmem.size,
2436 				  iwsrq->kmem.va, iwsrq->kmem.pa);
2437 free_rsrc:
2438 	irdma_free_rsrc(rf, rf->allocated_srqs, iwsrq->srq_num);
2439 	return err_code;
2440 }
2441 
2442 /**
2443  * irdma_query_srq - get SRQ attributes
2444  * @ibsrq: the SRQ to query
2445  * @attr: the attributes of the SRQ
2446  */
irdma_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr)2447 static int irdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
2448 {
2449 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
2450 
2451 	attr->max_wr = iwsrq->max_wr;
2452 	attr->max_sge = iwsrq->sc_srq.srq_uk.max_srq_frag_cnt;
2453 	attr->srq_limit = iwsrq->sc_srq.srq_limit;
2454 
2455 	return 0;
2456 }
2457 
cq_validate_flags(u32 flags,u8 hw_rev)2458 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
2459 {
2460 	/* GEN1/2 does not support CQ create flags */
2461 	if (hw_rev <= IRDMA_GEN_2)
2462 		return flags ? -EOPNOTSUPP : 0;
2463 
2464 	return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
2465 }
2466 
2467 /**
2468  * irdma_create_cq - create cq
2469  * @ibcq: CQ allocated
2470  * @attr: attributes for cq
2471  * @attrs: uverbs attribute bundle
2472  */
irdma_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)2473 static int irdma_create_cq(struct ib_cq *ibcq,
2474 			   const struct ib_cq_init_attr *attr,
2475 			   struct uverbs_attr_bundle *attrs)
2476 {
2477 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
2478 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
2479 	struct ib_udata *udata = &attrs->driver_udata;
2480 	struct ib_device *ibdev = ibcq->device;
2481 	struct irdma_device *iwdev = to_iwdev(ibdev);
2482 	struct irdma_pci_f *rf = iwdev->rf;
2483 	struct irdma_cq *iwcq = to_iwcq(ibcq);
2484 	u32 cq_num = 0;
2485 	struct irdma_sc_cq *cq;
2486 	struct irdma_sc_dev *dev = &rf->sc_dev;
2487 	struct irdma_cq_init_info info = {};
2488 	struct irdma_cqp_request *cqp_request;
2489 	struct cqp_cmds_info *cqp_info;
2490 	struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
2491 	unsigned long flags;
2492 	int err_code;
2493 	int entries = attr->cqe;
2494 	bool cqe_64byte_ena;
2495 	u8 cqe_size;
2496 
2497 	err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
2498 	if (err_code)
2499 		return err_code;
2500 
2501 	if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
2502 		      udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
2503 		return -EINVAL;
2504 
2505 	err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
2506 				    &rf->next_cq);
2507 	if (err_code)
2508 		return err_code;
2509 
2510 	cq = &iwcq->sc_cq;
2511 	cq->back_cq = iwcq;
2512 	refcount_set(&iwcq->refcnt, 1);
2513 	spin_lock_init(&iwcq->lock);
2514 	INIT_LIST_HEAD(&iwcq->resize_list);
2515 	INIT_LIST_HEAD(&iwcq->cmpl_generated);
2516 	iwcq->cq_num = cq_num;
2517 	info.dev = dev;
2518 	ukinfo->cq_size = max(entries, 4);
2519 	ukinfo->cq_id = cq_num;
2520 	cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ?
2521 			 true : false;
2522 	cqe_size = cqe_64byte_ena ? 64 : 32;
2523 	ukinfo->avoid_mem_cflct = cqe_64byte_ena;
2524 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
2525 	if (attr->comp_vector < rf->ceqs_count)
2526 		info.ceq_id = attr->comp_vector;
2527 	info.ceq_id_valid = true;
2528 	info.ceqe_mask = 1;
2529 	info.type = IRDMA_CQ_TYPE_IWARP;
2530 	info.vsi = &iwdev->vsi;
2531 
2532 	if (udata) {
2533 		struct irdma_ucontext *ucontext;
2534 		struct irdma_create_cq_req req = {};
2535 		struct irdma_cq_mr *cqmr;
2536 		struct irdma_pbl *iwpbl;
2537 		struct irdma_pbl *iwpbl_shadow;
2538 		struct irdma_cq_mr *cqmr_shadow;
2539 
2540 		iwcq->user_mode = true;
2541 		ucontext =
2542 			rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2543 						  ibucontext);
2544 		if (ib_copy_from_udata(&req, udata,
2545 				       min(sizeof(req), udata->inlen))) {
2546 			err_code = -EFAULT;
2547 			goto cq_free_rsrc;
2548 		}
2549 
2550 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2551 		iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2552 				      &ucontext->cq_reg_mem_list);
2553 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2554 		if (!iwpbl) {
2555 			err_code = -EPROTO;
2556 			goto cq_free_rsrc;
2557 		}
2558 
2559 		cqmr = &iwpbl->cq_mr;
2560 
2561 		if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2562 		    IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2563 			spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2564 			iwpbl_shadow = irdma_get_pbl(
2565 					(unsigned long)req.user_shadow_area,
2566 					&ucontext->cq_reg_mem_list);
2567 			spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2568 
2569 			if (!iwpbl_shadow) {
2570 				err_code = -EPROTO;
2571 				goto cq_free_rsrc;
2572 			}
2573 			cqmr_shadow = &iwpbl_shadow->cq_mr;
2574 			info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2575 			cqmr->split = true;
2576 		} else {
2577 			info.shadow_area_pa = cqmr->shadow;
2578 		}
2579 		if (iwpbl->pbl_allocated) {
2580 			info.virtual_map = true;
2581 			info.pbl_chunk_size = 1;
2582 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2583 		} else {
2584 			info.cq_base_pa = cqmr->cq_pbl.addr;
2585 		}
2586 	} else {
2587 		/* Kmode allocations */
2588 		int rsize;
2589 
2590 		if (entries < 1 || entries > rf->max_cqe) {
2591 			err_code = -EINVAL;
2592 			goto cq_free_rsrc;
2593 		}
2594 
2595 		entries += 2;
2596 		if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2597 			entries *= 2;
2598 
2599 		if (entries & 1)
2600 			entries += 1; /* cq size must be an even number */
2601 
2602 		if (entries * cqe_size == IRDMA_HW_PAGE_SIZE)
2603 			entries += 2;
2604 
2605 		ukinfo->cq_size = entries;
2606 
2607 		if (cqe_64byte_ena)
2608 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
2609 		else
2610 			rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2611 		iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2612 		iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2613 						   iwcq->kmem.size,
2614 						   &iwcq->kmem.pa, GFP_KERNEL);
2615 		if (!iwcq->kmem.va) {
2616 			err_code = -ENOMEM;
2617 			goto cq_free_rsrc;
2618 		}
2619 
2620 		iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2621 					       64);
2622 		iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2623 							  iwcq->kmem_shadow.size,
2624 							  &iwcq->kmem_shadow.pa,
2625 							  GFP_KERNEL);
2626 		if (!iwcq->kmem_shadow.va) {
2627 			err_code = -ENOMEM;
2628 			goto cq_free_rsrc;
2629 		}
2630 		info.shadow_area_pa = iwcq->kmem_shadow.pa;
2631 		ukinfo->shadow_area = iwcq->kmem_shadow.va;
2632 		ukinfo->cq_base = iwcq->kmem.va;
2633 		info.cq_base_pa = iwcq->kmem.pa;
2634 	}
2635 
2636 	info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2637 					 (u32)IRDMA_MAX_CQ_READ_THRESH);
2638 
2639 	if (irdma_sc_cq_init(cq, &info)) {
2640 		ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2641 		err_code = -EPROTO;
2642 		goto cq_free_rsrc;
2643 	}
2644 
2645 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2646 	if (!cqp_request) {
2647 		err_code = -ENOMEM;
2648 		goto cq_free_rsrc;
2649 	}
2650 
2651 	cqp_info = &cqp_request->info;
2652 	cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2653 	cqp_info->post_sq = 1;
2654 	cqp_info->in.u.cq_create.cq = cq;
2655 	cqp_info->in.u.cq_create.check_overflow = true;
2656 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2657 	err_code = irdma_handle_cqp_op(rf, cqp_request);
2658 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2659 	if (err_code)
2660 		goto cq_free_rsrc;
2661 
2662 	if (udata) {
2663 		struct irdma_create_cq_resp resp = {};
2664 
2665 		resp.cq_id = info.cq_uk_init_info.cq_id;
2666 		resp.cq_size = info.cq_uk_init_info.cq_size;
2667 		if (ib_copy_to_udata(udata, &resp,
2668 				     min(sizeof(resp), udata->outlen))) {
2669 			ibdev_dbg(&iwdev->ibdev,
2670 				  "VERBS: copy to user data\n");
2671 			err_code = -EPROTO;
2672 			goto cq_destroy;
2673 		}
2674 	}
2675 
2676 	init_completion(&iwcq->free_cq);
2677 
2678 	/* Populate table entry after CQ is fully created. */
2679 	smp_store_release(&rf->cq_table[cq_num], iwcq);
2680 
2681 	return 0;
2682 cq_destroy:
2683 	irdma_cq_wq_destroy(rf, cq);
2684 cq_free_rsrc:
2685 	irdma_cq_free_rsrc(rf, iwcq);
2686 
2687 	return err_code;
2688 }
2689 
2690 /**
2691  * irdma_get_mr_access - get hw MR access permissions from IB access flags
2692  * @access: IB access flags
2693  * @hw_rev: Hardware version
2694  */
irdma_get_mr_access(int access,u8 hw_rev)2695 static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
2696 {
2697 	u16 hw_access = 0;
2698 
2699 	hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2700 		     IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2701 	hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2702 		     IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2703 	hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2704 		     IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2705 	if (hw_rev >= IRDMA_GEN_3) {
2706 		hw_access |= (access & IB_ACCESS_MW_BIND) ?
2707 			     IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2708 	}
2709 	hw_access |= (access & IB_ZERO_BASED) ?
2710 		     IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2711 	hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2712 
2713 	return hw_access;
2714 }
2715 
2716 /**
2717  * irdma_free_stag - free stag resource
2718  * @iwdev: irdma device
2719  * @stag: stag to free
2720  */
irdma_free_stag(struct irdma_device * iwdev,u32 stag)2721 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2722 {
2723 	u32 stag_idx;
2724 
2725 	stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2726 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2727 }
2728 
2729 /**
2730  * irdma_create_stag - create random stag
2731  * @iwdev: irdma device
2732  */
irdma_create_stag(struct irdma_device * iwdev)2733 static u32 irdma_create_stag(struct irdma_device *iwdev)
2734 {
2735 	u32 stag = 0;
2736 	u32 stag_index = 0;
2737 	u32 next_stag_index;
2738 	u32 driver_key;
2739 	u32 random;
2740 	u8 consumer_key;
2741 	int ret;
2742 
2743 	get_random_bytes(&random, sizeof(random));
2744 	consumer_key = (u8)random;
2745 
2746 	driver_key = random & ~iwdev->rf->mr_stagmask;
2747 	next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2748 	next_stag_index %= iwdev->rf->max_mr;
2749 
2750 	ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2751 			       iwdev->rf->max_mr, &stag_index,
2752 			       &next_stag_index);
2753 	if (ret)
2754 		return stag;
2755 	stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2756 	stag |= driver_key;
2757 	stag += (u32)consumer_key;
2758 
2759 	return stag;
2760 }
2761 
2762 /**
2763  * irdma_next_pbl_addr - Get next pbl address
2764  * @pbl: pointer to a pble
2765  * @pinfo: info pointer
2766  * @idx: index
2767  */
irdma_next_pbl_addr(u64 * pbl,struct irdma_pble_info ** pinfo,u32 * idx)2768 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2769 				       u32 *idx)
2770 {
2771 	*idx += 1;
2772 	if (!(*pinfo) || *idx != (*pinfo)->cnt)
2773 		return ++pbl;
2774 	*idx = 0;
2775 	(*pinfo)++;
2776 
2777 	return (*pinfo)->addr;
2778 }
2779 
2780 /**
2781  * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2782  * @iwmr: iwmr for IB's user page addresses
2783  * @pbl: ple pointer to save 1 level or 0 level pble
2784  * @level: indicated level 0, 1 or 2
2785  */
irdma_copy_user_pgaddrs(struct irdma_mr * iwmr,u64 * pbl,enum irdma_pble_level level)2786 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2787 				    enum irdma_pble_level level)
2788 {
2789 	struct ib_umem *region = iwmr->region;
2790 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2791 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2792 	struct irdma_pble_info *pinfo;
2793 	struct ib_block_iter biter;
2794 	u32 idx = 0;
2795 	u32 pbl_cnt = 0;
2796 
2797 	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2798 
2799 	if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2800 		iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2801 
2802 	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2803 		*pbl = rdma_block_iter_dma_address(&biter);
2804 		if (++pbl_cnt == palloc->total_cnt)
2805 			break;
2806 		pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2807 	}
2808 }
2809 
2810 /**
2811  * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2812  * @arr: lvl1 pbl array
2813  * @npages: page count
2814  * @pg_size: page size
2815  *
2816  */
irdma_check_mem_contiguous(u64 * arr,u32 npages,u32 pg_size)2817 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2818 {
2819 	u32 pg_idx;
2820 
2821 	for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2822 		if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2823 			return false;
2824 	}
2825 
2826 	return true;
2827 }
2828 
2829 /**
2830  * irdma_check_mr_contiguous - check if MR is physically contiguous
2831  * @palloc: pbl allocation struct
2832  * @pg_size: page size
2833  */
irdma_check_mr_contiguous(struct irdma_pble_alloc * palloc,u32 pg_size)2834 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2835 				      u32 pg_size)
2836 {
2837 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
2838 	struct irdma_pble_info *leaf = lvl2->leaf;
2839 	u64 *arr = NULL;
2840 	u64 *start_addr = NULL;
2841 	int i;
2842 	bool ret;
2843 
2844 	if (palloc->level == PBLE_LEVEL_1) {
2845 		arr = palloc->level1.addr;
2846 		ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2847 						 pg_size);
2848 		return ret;
2849 	}
2850 
2851 	start_addr = leaf->addr;
2852 
2853 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2854 		arr = leaf->addr;
2855 		if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2856 			return false;
2857 		ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2858 		if (!ret)
2859 			return false;
2860 	}
2861 
2862 	return true;
2863 }
2864 
2865 /**
2866  * irdma_setup_pbles - copy user pg address to pble's
2867  * @rf: RDMA PCI function
2868  * @iwmr: mr pointer for this memory registration
2869  * @lvl: requested pble levels
2870  */
irdma_setup_pbles(struct irdma_pci_f * rf,struct irdma_mr * iwmr,u8 lvl)2871 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2872 			     u8 lvl)
2873 {
2874 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2875 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2876 	struct irdma_pble_info *pinfo;
2877 	u64 *pbl;
2878 	int status;
2879 	enum irdma_pble_level level = PBLE_LEVEL_1;
2880 
2881 	if (lvl) {
2882 		status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2883 					lvl);
2884 		if (status)
2885 			return status;
2886 
2887 		iwpbl->pbl_allocated = true;
2888 		level = palloc->level;
2889 		pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2890 						  palloc->level2.leaf;
2891 		pbl = pinfo->addr;
2892 	} else {
2893 		pbl = iwmr->pgaddrmem;
2894 	}
2895 
2896 	irdma_copy_user_pgaddrs(iwmr, pbl, level);
2897 
2898 	if (lvl)
2899 		iwmr->pgaddrmem[0] = *pbl;
2900 
2901 	return 0;
2902 }
2903 
2904 /**
2905  * irdma_handle_q_mem - handle memory for qp and cq
2906  * @iwdev: irdma device
2907  * @req: information for q memory management
2908  * @iwpbl: pble struct
2909  * @lvl: pble level mask
2910  */
irdma_handle_q_mem(struct irdma_device * iwdev,struct irdma_mem_reg_req * req,struct irdma_pbl * iwpbl,u8 lvl)2911 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2912 			      struct irdma_mem_reg_req *req,
2913 			      struct irdma_pbl *iwpbl, u8 lvl)
2914 {
2915 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2916 	struct irdma_mr *iwmr = iwpbl->iwmr;
2917 	struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2918 	struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2919 	struct irdma_srq_mr *srqmr = &iwpbl->srq_mr;
2920 	struct irdma_hmc_pble *hmc_p;
2921 	u64 *arr = iwmr->pgaddrmem;
2922 	u32 pg_size, total;
2923 	int err = 0;
2924 	bool ret = true;
2925 
2926 	pg_size = iwmr->page_size;
2927 	err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2928 	if (err)
2929 		return err;
2930 
2931 	if (lvl)
2932 		arr = palloc->level1.addr;
2933 
2934 	switch (iwmr->type) {
2935 	case IRDMA_MEMREG_TYPE_QP:
2936 		total = req->sq_pages + req->rq_pages;
2937 		hmc_p = &qpmr->sq_pbl;
2938 		qpmr->shadow = (dma_addr_t)arr[total];
2939 		/* Need to use physical address for RQ of QP
2940 		 * in case it is associated with SRQ.
2941 		 */
2942 		qpmr->rq_pa = (dma_addr_t)arr[req->sq_pages];
2943 		if (lvl) {
2944 			ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2945 							 pg_size);
2946 			if (ret)
2947 				ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2948 								 req->rq_pages,
2949 								 pg_size);
2950 		}
2951 
2952 		if (!ret) {
2953 			hmc_p->idx = palloc->level1.idx;
2954 			hmc_p = &qpmr->rq_pbl;
2955 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
2956 		} else {
2957 			hmc_p->addr = arr[0];
2958 			hmc_p = &qpmr->rq_pbl;
2959 			hmc_p->addr = arr[req->sq_pages];
2960 		}
2961 		break;
2962 	case IRDMA_MEMREG_TYPE_SRQ:
2963 		hmc_p = &srqmr->srq_pbl;
2964 		srqmr->shadow = (dma_addr_t)arr[req->rq_pages];
2965 		if (lvl)
2966 			ret = irdma_check_mem_contiguous(arr, req->rq_pages,
2967 							 pg_size);
2968 
2969 		if (!ret)
2970 			hmc_p->idx = palloc->level1.idx;
2971 		else
2972 			hmc_p->addr = arr[0];
2973 	break;
2974 	case IRDMA_MEMREG_TYPE_CQ:
2975 		hmc_p = &cqmr->cq_pbl;
2976 
2977 		if (!cqmr->split)
2978 			cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2979 
2980 		if (lvl)
2981 			ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2982 							 pg_size);
2983 
2984 		if (!ret)
2985 			hmc_p->idx = palloc->level1.idx;
2986 		else
2987 			hmc_p->addr = arr[0];
2988 	break;
2989 	default:
2990 		ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2991 		err = -EINVAL;
2992 	}
2993 
2994 	if (lvl && ret) {
2995 		irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2996 		iwpbl->pbl_allocated = false;
2997 	}
2998 
2999 	return err;
3000 }
3001 
3002 /**
3003  * irdma_hw_alloc_mw - create the hw memory window
3004  * @iwdev: irdma device
3005  * @iwmr: pointer to memory window info
3006  */
irdma_hw_alloc_mw(struct irdma_device * iwdev,struct irdma_mr * iwmr)3007 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
3008 {
3009 	struct irdma_mw_alloc_info *info;
3010 	struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
3011 	struct irdma_cqp_request *cqp_request;
3012 	struct cqp_cmds_info *cqp_info;
3013 	int status;
3014 
3015 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3016 	if (!cqp_request)
3017 		return -ENOMEM;
3018 
3019 	cqp_info = &cqp_request->info;
3020 	info = &cqp_info->in.u.mw_alloc.info;
3021 	memset(info, 0, sizeof(*info));
3022 	if (iwmr->ibmw.type == IB_MW_TYPE_1)
3023 		info->mw_wide = true;
3024 
3025 	info->page_size = PAGE_SIZE;
3026 	info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
3027 	info->pd_id = iwpd->sc_pd.pd_id;
3028 	info->remote_access = true;
3029 	cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
3030 	cqp_info->post_sq = 1;
3031 	cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
3032 	cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
3033 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3034 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3035 
3036 	return status;
3037 }
3038 
3039 /**
3040  * irdma_alloc_mw - Allocate memory window
3041  * @ibmw: Memory Window
3042  * @udata: user data pointer
3043  */
irdma_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)3044 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
3045 {
3046 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
3047 	struct irdma_mr *iwmr = to_iwmw(ibmw);
3048 	int err_code;
3049 	u32 stag;
3050 
3051 	stag = irdma_create_stag(iwdev);
3052 	if (!stag)
3053 		return -ENOMEM;
3054 
3055 	iwmr->stag = stag;
3056 	ibmw->rkey = stag;
3057 
3058 	err_code = irdma_hw_alloc_mw(iwdev, iwmr);
3059 	if (err_code) {
3060 		irdma_free_stag(iwdev, stag);
3061 		return err_code;
3062 	}
3063 
3064 	return 0;
3065 }
3066 
3067 /**
3068  * irdma_dealloc_mw - Dealloc memory window
3069  * @ibmw: memory window structure.
3070  */
irdma_dealloc_mw(struct ib_mw * ibmw)3071 static int irdma_dealloc_mw(struct ib_mw *ibmw)
3072 {
3073 	struct ib_pd *ibpd = ibmw->pd;
3074 	struct irdma_pd *iwpd = to_iwpd(ibpd);
3075 	struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
3076 	struct irdma_device *iwdev = to_iwdev(ibmw->device);
3077 	struct irdma_cqp_request *cqp_request;
3078 	struct cqp_cmds_info *cqp_info;
3079 	struct irdma_dealloc_stag_info *info;
3080 
3081 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3082 	if (!cqp_request)
3083 		return -ENOMEM;
3084 
3085 	cqp_info = &cqp_request->info;
3086 	info = &cqp_info->in.u.dealloc_stag.info;
3087 	memset(info, 0, sizeof(*info));
3088 	info->pd_id = iwpd->sc_pd.pd_id;
3089 	info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3090 	info->mr = false;
3091 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3092 	cqp_info->post_sq = 1;
3093 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3094 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3095 	irdma_handle_cqp_op(iwdev->rf, cqp_request);
3096 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3097 	irdma_free_stag(iwdev, iwmr->stag);
3098 
3099 	return 0;
3100 }
3101 
3102 /**
3103  * irdma_hw_alloc_stag - cqp command to allocate stag
3104  * @iwdev: irdma device
3105  * @iwmr: irdma mr pointer
3106  */
irdma_hw_alloc_stag(struct irdma_device * iwdev,struct irdma_mr * iwmr)3107 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
3108 			       struct irdma_mr *iwmr)
3109 {
3110 	struct irdma_allocate_stag_info *info;
3111 	struct ib_pd *pd = iwmr->ibmr.pd;
3112 	struct irdma_pd *iwpd = to_iwpd(pd);
3113 	int status;
3114 	struct irdma_cqp_request *cqp_request;
3115 	struct cqp_cmds_info *cqp_info;
3116 
3117 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3118 	if (!cqp_request)
3119 		return -ENOMEM;
3120 
3121 	cqp_info = &cqp_request->info;
3122 	info = &cqp_info->in.u.alloc_stag.info;
3123 	info->page_size = PAGE_SIZE;
3124 	info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
3125 	info->pd_id = iwpd->sc_pd.pd_id;
3126 	info->total_len = iwmr->len;
3127 	info->remote_access = true;
3128 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
3129 	cqp_info->post_sq = 1;
3130 	cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
3131 	cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
3132 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3133 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3134 	if (status)
3135 		return status;
3136 
3137 	iwmr->is_hwreg = true;
3138 	return 0;
3139 }
3140 
3141 /**
3142  * irdma_alloc_mr - register stag for fast memory registration
3143  * @pd: ibpd pointer
3144  * @mr_type: memory for stag registrion
3145  * @max_num_sg: man number of pages
3146  */
irdma_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)3147 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
3148 				    u32 max_num_sg)
3149 {
3150 	struct irdma_device *iwdev = to_iwdev(pd->device);
3151 	struct irdma_pble_alloc *palloc;
3152 	struct irdma_pbl *iwpbl;
3153 	struct irdma_mr *iwmr;
3154 	u32 stag;
3155 	int err_code;
3156 
3157 	iwmr = kzalloc_obj(*iwmr);
3158 	if (!iwmr)
3159 		return ERR_PTR(-ENOMEM);
3160 
3161 	stag = irdma_create_stag(iwdev);
3162 	if (!stag) {
3163 		err_code = -ENOMEM;
3164 		goto err;
3165 	}
3166 
3167 	iwmr->stag = stag;
3168 	iwmr->ibmr.rkey = stag;
3169 	iwmr->ibmr.lkey = stag;
3170 	iwmr->ibmr.pd = pd;
3171 	iwmr->ibmr.device = pd->device;
3172 	iwpbl = &iwmr->iwpbl;
3173 	iwpbl->iwmr = iwmr;
3174 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3175 	palloc = &iwpbl->pble_alloc;
3176 	iwmr->page_cnt = max_num_sg;
3177 	/* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
3178 	iwmr->len = max_num_sg * PAGE_SIZE;
3179 	err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
3180 				  false);
3181 	if (err_code)
3182 		goto err_get_pble;
3183 
3184 	err_code = irdma_hw_alloc_stag(iwdev, iwmr);
3185 	if (err_code)
3186 		goto err_alloc_stag;
3187 
3188 	iwpbl->pbl_allocated = true;
3189 
3190 	return &iwmr->ibmr;
3191 err_alloc_stag:
3192 	irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3193 err_get_pble:
3194 	irdma_free_stag(iwdev, stag);
3195 err:
3196 	kfree(iwmr);
3197 
3198 	return ERR_PTR(err_code);
3199 }
3200 
3201 /**
3202  * irdma_set_page - populate pbl list for fmr
3203  * @ibmr: ib mem to access iwarp mr pointer
3204  * @addr: page dma address fro pbl list
3205  */
irdma_set_page(struct ib_mr * ibmr,u64 addr)3206 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
3207 {
3208 	struct irdma_mr *iwmr = to_iwmr(ibmr);
3209 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3210 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3211 	u64 *pbl;
3212 
3213 	if (unlikely(iwmr->npages == iwmr->page_cnt))
3214 		return -ENOMEM;
3215 
3216 	if (palloc->level == PBLE_LEVEL_2) {
3217 		struct irdma_pble_info *palloc_info =
3218 			palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
3219 
3220 		palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
3221 	} else {
3222 		pbl = palloc->level1.addr;
3223 		pbl[iwmr->npages] = addr;
3224 	}
3225 	iwmr->npages++;
3226 
3227 	return 0;
3228 }
3229 
3230 /**
3231  * irdma_map_mr_sg - map of sg list for fmr
3232  * @ibmr: ib mem to access iwarp mr pointer
3233  * @sg: scatter gather list
3234  * @sg_nents: number of sg pages
3235  * @sg_offset: scatter gather list for fmr
3236  */
irdma_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3237 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3238 			   int sg_nents, unsigned int *sg_offset)
3239 {
3240 	struct irdma_mr *iwmr = to_iwmr(ibmr);
3241 
3242 	iwmr->npages = 0;
3243 
3244 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
3245 }
3246 
3247 /**
3248  * irdma_hwreg_mr - send cqp command for memory registration
3249  * @iwdev: irdma device
3250  * @iwmr: irdma mr pointer
3251  * @access: access for MR
3252  */
irdma_hwreg_mr(struct irdma_device * iwdev,struct irdma_mr * iwmr,u16 access)3253 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
3254 			  u16 access)
3255 {
3256 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3257 	struct irdma_reg_ns_stag_info *stag_info;
3258 	struct ib_pd *pd = iwmr->ibmr.pd;
3259 	struct irdma_pd *iwpd = to_iwpd(pd);
3260 	struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3261 	struct irdma_cqp_request *cqp_request;
3262 	struct cqp_cmds_info *cqp_info;
3263 	int ret;
3264 
3265 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3266 	if (!cqp_request)
3267 		return -ENOMEM;
3268 
3269 	cqp_info = &cqp_request->info;
3270 	stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
3271 	stag_info->va = iwpbl->user_base;
3272 	stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
3273 	stag_info->stag_key = (u8)iwmr->stag;
3274 	stag_info->total_len = iwmr->len;
3275 	stag_info->access_rights = irdma_get_mr_access(access,
3276 						       iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
3277 	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
3278 		stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3279 	stag_info->pd_id = iwpd->sc_pd.pd_id;
3280 	stag_info->all_memory = iwmr->dma_mr;
3281 	if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
3282 		stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
3283 	else
3284 		stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3285 	stag_info->page_size = iwmr->page_size;
3286 
3287 	if (iwpbl->pbl_allocated) {
3288 		if (palloc->level == PBLE_LEVEL_1) {
3289 			stag_info->first_pm_pbl_index = palloc->level1.idx;
3290 			stag_info->chunk_size = 1;
3291 		} else {
3292 			stag_info->first_pm_pbl_index = palloc->level2.root.idx;
3293 			stag_info->chunk_size = 3;
3294 		}
3295 	} else {
3296 		stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
3297 	}
3298 
3299 	cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
3300 	cqp_info->post_sq = 1;
3301 	cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
3302 	cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
3303 	ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3304 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3305 
3306 	if (!ret)
3307 		iwmr->is_hwreg = true;
3308 
3309 	return ret;
3310 }
3311 
irdma_reg_user_mr_type_mem(struct irdma_mr * iwmr,int access,bool create_stag)3312 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
3313 				      bool create_stag)
3314 {
3315 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3316 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3317 	u32 stag = 0;
3318 	u8 lvl;
3319 	int err;
3320 
3321 	lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
3322 
3323 	err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
3324 	if (err)
3325 		return err;
3326 
3327 	if (lvl) {
3328 		err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
3329 						iwmr->page_size);
3330 		if (err) {
3331 			irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
3332 			iwpbl->pbl_allocated = false;
3333 		}
3334 	}
3335 
3336 	if (create_stag) {
3337 		stag = irdma_create_stag(iwdev);
3338 		if (!stag) {
3339 			err = -ENOMEM;
3340 			goto free_pble;
3341 		}
3342 
3343 		iwmr->stag = stag;
3344 		iwmr->ibmr.rkey = stag;
3345 		iwmr->ibmr.lkey = stag;
3346 	}
3347 
3348 	err = irdma_hwreg_mr(iwdev, iwmr, access);
3349 	if (err)
3350 		goto err_hwreg;
3351 
3352 	return 0;
3353 
3354 err_hwreg:
3355 	if (stag)
3356 		irdma_free_stag(iwdev, stag);
3357 
3358 free_pble:
3359 	if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
3360 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
3361 
3362 	return err;
3363 }
3364 
irdma_alloc_iwmr(struct ib_umem * region,struct ib_pd * pd,u64 virt,enum irdma_memreg_type reg_type)3365 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
3366 					 struct ib_pd *pd, u64 virt,
3367 					 enum irdma_memreg_type reg_type)
3368 {
3369 	struct irdma_device *iwdev = to_iwdev(pd->device);
3370 	struct irdma_pbl *iwpbl;
3371 	struct irdma_mr *iwmr;
3372 	unsigned long pgsz_bitmap;
3373 
3374 	iwmr = kzalloc_obj(*iwmr);
3375 	if (!iwmr)
3376 		return ERR_PTR(-ENOMEM);
3377 
3378 	iwpbl = &iwmr->iwpbl;
3379 	iwpbl->iwmr = iwmr;
3380 	iwmr->region = region;
3381 	iwmr->ibmr.pd = pd;
3382 	iwmr->ibmr.device = pd->device;
3383 	iwmr->ibmr.iova = virt;
3384 	iwmr->type = reg_type;
3385 
3386 	pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
3387 		iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
3388 
3389 	iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
3390 	if (unlikely(!iwmr->page_size)) {
3391 		kfree(iwmr);
3392 		return ERR_PTR(-EOPNOTSUPP);
3393 	}
3394 
3395 	iwmr->len = region->length;
3396 	iwpbl->user_base = virt;
3397 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
3398 
3399 	return iwmr;
3400 }
3401 
irdma_free_iwmr(struct irdma_mr * iwmr)3402 static void irdma_free_iwmr(struct irdma_mr *iwmr)
3403 {
3404 	kfree(iwmr);
3405 }
3406 
irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,struct ib_udata * udata,struct irdma_mr * iwmr)3407 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
3408 				     struct ib_udata *udata,
3409 				     struct irdma_mr *iwmr)
3410 {
3411 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3412 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3413 	struct irdma_ucontext *ucontext = NULL;
3414 	unsigned long flags;
3415 	u32 total;
3416 	int err;
3417 	u8 lvl;
3418 
3419 	/* iWarp: Catch page not starting on OS page boundary */
3420 	if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
3421 	    ib_umem_offset(iwmr->region))
3422 		return -EINVAL;
3423 
3424 	total = req.sq_pages + req.rq_pages + 1;
3425 	if (total > iwmr->page_cnt)
3426 		return -EINVAL;
3427 
3428 	total = req.sq_pages + req.rq_pages;
3429 	lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
3430 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
3431 	if (err)
3432 		return err;
3433 
3434 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
3435 					     ibucontext);
3436 	spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3437 	list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
3438 	iwpbl->on_list = true;
3439 	spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3440 
3441 	return 0;
3442 }
3443 
irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req,struct ib_udata * udata,struct irdma_mr * iwmr)3444 static int irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req,
3445 				      struct ib_udata *udata,
3446 				      struct irdma_mr *iwmr)
3447 {
3448 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3449 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3450 	struct irdma_ucontext *ucontext;
3451 	unsigned long flags;
3452 	u32 total;
3453 	int err;
3454 	u8 lvl;
3455 
3456 	total = req.rq_pages + IRDMA_SHADOW_PGCNT;
3457 	if (total > iwmr->page_cnt)
3458 		return -EINVAL;
3459 
3460 	lvl = req.rq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
3461 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
3462 	if (err)
3463 		return err;
3464 
3465 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
3466 					     ibucontext);
3467 	spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
3468 	list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list);
3469 	iwpbl->on_list = true;
3470 	spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
3471 
3472 	return 0;
3473 }
3474 
irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,struct ib_udata * udata,struct irdma_mr * iwmr)3475 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
3476 				     struct ib_udata *udata,
3477 				     struct irdma_mr *iwmr)
3478 {
3479 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3480 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3481 	struct irdma_ucontext *ucontext = NULL;
3482 	u8 shadow_pgcnt = 1;
3483 	unsigned long flags;
3484 	u32 total;
3485 	int err;
3486 	u8 lvl;
3487 
3488 	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
3489 		shadow_pgcnt = 0;
3490 	total = req.cq_pages + shadow_pgcnt;
3491 	if (total > iwmr->page_cnt)
3492 		return -EINVAL;
3493 
3494 	lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
3495 	err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
3496 	if (err)
3497 		return err;
3498 
3499 	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
3500 					     ibucontext);
3501 	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3502 	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
3503 	iwpbl->on_list = true;
3504 	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3505 
3506 	return 0;
3507 }
3508 
3509 /**
3510  * irdma_reg_user_mr - Register a user memory region
3511  * @pd: ptr of pd
3512  * @start: virtual start address
3513  * @len: length of mr
3514  * @virt: virtual address
3515  * @access: access of mr
3516  * @dmah: dma handle
3517  * @udata: user data
3518  */
irdma_reg_user_mr(struct ib_pd * pd,u64 start,u64 len,u64 virt,int access,struct ib_dmah * dmah,struct ib_udata * udata)3519 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
3520 				       u64 virt, int access,
3521 				       struct ib_dmah *dmah,
3522 				       struct ib_udata *udata)
3523 {
3524 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
3525 	struct irdma_device *iwdev = to_iwdev(pd->device);
3526 	struct irdma_mem_reg_req req = {};
3527 	struct ib_umem *region = NULL;
3528 	struct irdma_mr *iwmr = NULL;
3529 	int err;
3530 
3531 	if (dmah)
3532 		return ERR_PTR(-EOPNOTSUPP);
3533 
3534 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3535 		return ERR_PTR(-EINVAL);
3536 
3537 	if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
3538 		return ERR_PTR(-EINVAL);
3539 
3540 	region = ib_umem_get(pd->device, start, len, access);
3541 
3542 	if (IS_ERR(region)) {
3543 		ibdev_dbg(&iwdev->ibdev,
3544 			  "VERBS: Failed to create ib_umem region\n");
3545 		return (struct ib_mr *)region;
3546 	}
3547 
3548 	if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
3549 		ib_umem_release(region);
3550 		return ERR_PTR(-EFAULT);
3551 	}
3552 
3553 	iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
3554 	if (IS_ERR(iwmr)) {
3555 		ib_umem_release(region);
3556 		return (struct ib_mr *)iwmr;
3557 	}
3558 
3559 	switch (req.reg_type) {
3560 	case IRDMA_MEMREG_TYPE_QP:
3561 		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
3562 		if (err)
3563 			goto error;
3564 
3565 		break;
3566 	case IRDMA_MEMREG_TYPE_SRQ:
3567 		err = irdma_reg_user_mr_type_srq(req, udata, iwmr);
3568 		if (err)
3569 			goto error;
3570 
3571 		break;
3572 	case IRDMA_MEMREG_TYPE_CQ:
3573 		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
3574 		if (err)
3575 			goto error;
3576 		break;
3577 	case IRDMA_MEMREG_TYPE_MEM:
3578 		err = irdma_reg_user_mr_type_mem(iwmr, access, true);
3579 		if (err)
3580 			goto error;
3581 
3582 		break;
3583 	default:
3584 		err = -EINVAL;
3585 		goto error;
3586 	}
3587 
3588 	return &iwmr->ibmr;
3589 error:
3590 	ib_umem_release(region);
3591 	irdma_free_iwmr(iwmr);
3592 
3593 	return ERR_PTR(err);
3594 }
3595 
3596 static int irdma_hwdereg_mr(struct ib_mr *ib_mr);
3597 
irdma_umem_dmabuf_revoke(void * priv)3598 static void irdma_umem_dmabuf_revoke(void *priv)
3599 {
3600 	/* priv is guaranteed to be valid any time this callback is invoked
3601 	 * because we do not set the callback until after successful iwmr
3602 	 * allocation and initialization.
3603 	 */
3604 	struct irdma_mr *iwmr = priv;
3605 	int err;
3606 
3607 	/* Invalidate the key in hardware. This does not actually release the
3608 	 * key for potential reuse - that only occurs when the region is fully
3609 	 * deregistered.
3610 	 *
3611 	 * The irdma_hwdereg_mr call is a no-op if the region is not currently
3612 	 * registered with hardware.
3613 	 */
3614 	err = irdma_hwdereg_mr(&iwmr->ibmr);
3615 	if (err) {
3616 		struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3617 
3618 		ibdev_err(&iwdev->ibdev, "dmabuf mr revoke failed %d", err);
3619 		if (!iwdev->rf->reset) {
3620 			iwdev->rf->reset = true;
3621 			iwdev->rf->gen_ops.request_reset(iwdev->rf);
3622 		}
3623 	}
3624 }
3625 
irdma_reg_user_mr_dmabuf(struct ib_pd * pd,u64 start,u64 len,u64 virt,int fd,int access,struct ib_dmah * dmah,struct uverbs_attr_bundle * attrs)3626 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
3627 					      u64 len, u64 virt,
3628 					      int fd, int access,
3629 					      struct ib_dmah *dmah,
3630 					      struct uverbs_attr_bundle *attrs)
3631 {
3632 	struct irdma_device *iwdev = to_iwdev(pd->device);
3633 	struct ib_umem_dmabuf *umem_dmabuf;
3634 	struct irdma_mr *iwmr;
3635 	int err;
3636 
3637 	if (dmah)
3638 		return ERR_PTR(-EOPNOTSUPP);
3639 
3640 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3641 		return ERR_PTR(-EINVAL);
3642 
3643 	umem_dmabuf =
3644 		ib_umem_dmabuf_get_pinned_revocable_and_lock(pd->device, start,
3645 							     len, fd, access);
3646 	if (IS_ERR(umem_dmabuf)) {
3647 		ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n",
3648 			  umem_dmabuf);
3649 		return ERR_CAST(umem_dmabuf);
3650 	}
3651 
3652 	iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
3653 	if (IS_ERR(iwmr)) {
3654 		err = PTR_ERR(iwmr);
3655 		goto err_release;
3656 	}
3657 
3658 	err = irdma_reg_user_mr_type_mem(iwmr, access, true);
3659 	if (err)
3660 		goto err_iwmr;
3661 
3662 	ib_umem_dmabuf_set_revoke_locked(umem_dmabuf, irdma_umem_dmabuf_revoke,
3663 					 iwmr);
3664 	ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
3665 	return &iwmr->ibmr;
3666 
3667 err_iwmr:
3668 	irdma_free_iwmr(iwmr);
3669 
3670 err_release:
3671 	ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
3672 
3673 	/* Will result in a call to revoke, but driver callback is not set and
3674 	 * is therefore skipped.
3675 	 */
3676 	ib_umem_release(&umem_dmabuf->umem);
3677 
3678 	return ERR_PTR(err);
3679 }
3680 
irdma_hwdereg_mr(struct ib_mr * ib_mr)3681 static int irdma_hwdereg_mr(struct ib_mr *ib_mr)
3682 {
3683 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3684 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3685 	struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
3686 	struct irdma_dealloc_stag_info *info;
3687 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3688 	struct irdma_cqp_request *cqp_request;
3689 	struct cqp_cmds_info *cqp_info;
3690 	int status;
3691 
3692 	/* Skip HW MR de-register when it is already de-registered
3693 	 * during an MR re-reregister and the re-registration fails
3694 	 */
3695 	if (!iwmr->is_hwreg)
3696 		return 0;
3697 
3698 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3699 	if (!cqp_request)
3700 		return -ENOMEM;
3701 
3702 	cqp_info = &cqp_request->info;
3703 	info = &cqp_info->in.u.dealloc_stag.info;
3704 	info->pd_id = iwpd->sc_pd.pd_id;
3705 	info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3706 	info->mr = true;
3707 	if (iwpbl->pbl_allocated)
3708 		info->dealloc_pbl = true;
3709 
3710 	cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3711 	cqp_info->post_sq = 1;
3712 	cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3713 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3714 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3715 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3716 	if (status)
3717 		return status;
3718 
3719 	iwmr->is_hwreg = false;
3720 	return 0;
3721 }
3722 
3723 /*
3724  * irdma_rereg_mr_trans - Re-register a user MR for a change translation.
3725  * @iwmr: ptr of iwmr
3726  * @start: virtual start address
3727  * @len: length of mr
3728  * @virt: virtual address
3729  *
3730  * Re-register a user memory region when a change translation is requested.
3731  * Re-register a new region while reusing the stag from the original registration.
3732  */
irdma_rereg_mr_trans(struct irdma_mr * iwmr,u64 start,u64 len,u64 virt)3733 static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
3734 				u64 virt)
3735 {
3736 	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3737 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3738 	struct ib_pd *pd = iwmr->ibmr.pd;
3739 	struct ib_umem *region;
3740 	int err;
3741 
3742 	region = ib_umem_get(pd->device, start, len, iwmr->access);
3743 	if (IS_ERR(region))
3744 		return PTR_ERR(region);
3745 
3746 	iwmr->region = region;
3747 	iwmr->ibmr.iova = virt;
3748 	iwmr->ibmr.pd = pd;
3749 	iwmr->page_size = ib_umem_find_best_pgsz(region,
3750 				iwdev->rf->sc_dev.hw_attrs.page_size_cap,
3751 				virt);
3752 	if (unlikely(!iwmr->page_size)) {
3753 		err = -EOPNOTSUPP;
3754 		goto err;
3755 	}
3756 
3757 	iwmr->len = region->length;
3758 	iwpbl->user_base = virt;
3759 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
3760 
3761 	err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false);
3762 	if (err)
3763 		goto err;
3764 
3765 	return 0;
3766 
3767 err:
3768 	ib_umem_release(region);
3769 	iwmr->region = NULL;
3770 	return err;
3771 }
3772 
3773 /*
3774  *  irdma_rereg_user_mr - Re-Register a user memory region(MR)
3775  *  @ibmr: ib mem to access iwarp mr pointer
3776  *  @flags: bit mask to indicate which of the attr's of MR modified
3777  *  @start: virtual start address
3778  *  @len: length of mr
3779  *  @virt: virtual address
3780  *  @new_access: bit mask of access flags
3781  *  @new_pd: ptr of pd
3782  *  @udata: user data
3783  *
3784  *  Return:
3785  *  NULL - Success, existing MR updated
3786  *  ERR_PTR - error occurred
3787  */
irdma_rereg_user_mr(struct ib_mr * ib_mr,int flags,u64 start,u64 len,u64 virt,int new_access,struct ib_pd * new_pd,struct ib_udata * udata)3788 static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags,
3789 					 u64 start, u64 len, u64 virt,
3790 					 int new_access, struct ib_pd *new_pd,
3791 					 struct ib_udata *udata)
3792 {
3793 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3794 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3795 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3796 	bool dmabuf_revocable = iwmr->region && iwmr->region->is_dmabuf;
3797 	struct ib_umem_dmabuf *umem_dmabuf;
3798 	int ret;
3799 
3800 	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3801 		return ERR_PTR(-EINVAL);
3802 
3803 	if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
3804 		return ERR_PTR(-EOPNOTSUPP);
3805 
3806 	if (dmabuf_revocable) {
3807 		umem_dmabuf = to_ib_umem_dmabuf(iwmr->region);
3808 
3809 		ib_umem_dmabuf_revoke_lock(umem_dmabuf);
3810 
3811 		/* If the dmabuf has been revoked, it means that the region has
3812 		 * been invalidated in HW. We must not allow it to become valid
3813 		 * again unless the user is requesting a change in translation
3814 		 * which will end up dropping the umem dmabuf and allocating an
3815 		 * entirely new umem anyway.
3816 		 */
3817 		if (umem_dmabuf->revoked && !(flags & IB_MR_REREG_TRANS)) {
3818 			ret = -EINVAL;
3819 			goto err_unlock;
3820 		}
3821 	}
3822 
3823 	ret = irdma_hwdereg_mr(ib_mr);
3824 	if (ret)
3825 		goto err_unlock;
3826 
3827 	if (flags & IB_MR_REREG_ACCESS)
3828 		iwmr->access = new_access;
3829 
3830 	if (flags & IB_MR_REREG_PD) {
3831 		iwmr->ibmr.pd = new_pd;
3832 		iwmr->ibmr.device = new_pd->device;
3833 	}
3834 
3835 	if (flags & IB_MR_REREG_TRANS) {
3836 		if (iwpbl->pbl_allocated) {
3837 			irdma_free_pble(iwdev->rf->pble_rsrc,
3838 					&iwpbl->pble_alloc);
3839 			iwpbl->pbl_allocated = false;
3840 		}
3841 
3842 		if (dmabuf_revocable) {
3843 			/* Must unlock before release to prevent deadlock */
3844 			ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
3845 			dmabuf_revocable = false;
3846 		}
3847 
3848 		if (iwmr->region) {
3849 			ib_umem_release(iwmr->region);
3850 			iwmr->region = NULL;
3851 		}
3852 
3853 		ret = irdma_rereg_mr_trans(iwmr, start, len, virt);
3854 	} else {
3855 		ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
3856 	}
3857 
3858 err_unlock:
3859 	if (dmabuf_revocable)
3860 		ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
3861 
3862 	return ret ? ERR_PTR(ret) : NULL;
3863 }
3864 
3865 /**
3866  * irdma_reg_phys_mr - register kernel physical memory
3867  * @pd: ibpd pointer
3868  * @addr: physical address of memory to register
3869  * @size: size of memory to register
3870  * @access: Access rights
3871  * @iova_start: start of virtual address for physical buffers
3872  * @dma_mr: Flag indicating whether this region is a PD DMA MR
3873  */
irdma_reg_phys_mr(struct ib_pd * pd,u64 addr,u64 size,int access,u64 * iova_start,bool dma_mr)3874 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
3875 				u64 *iova_start, bool dma_mr)
3876 {
3877 	struct irdma_device *iwdev = to_iwdev(pd->device);
3878 	struct irdma_pbl *iwpbl;
3879 	struct irdma_mr *iwmr;
3880 	u32 stag;
3881 	int ret;
3882 
3883 	iwmr = kzalloc_obj(*iwmr);
3884 	if (!iwmr)
3885 		return ERR_PTR(-ENOMEM);
3886 
3887 	iwmr->ibmr.pd = pd;
3888 	iwmr->ibmr.device = pd->device;
3889 	iwpbl = &iwmr->iwpbl;
3890 	iwpbl->iwmr = iwmr;
3891 	iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3892 	iwmr->dma_mr = dma_mr;
3893 	iwpbl->user_base = *iova_start;
3894 	stag = irdma_create_stag(iwdev);
3895 	if (!stag) {
3896 		ret = -ENOMEM;
3897 		goto err;
3898 	}
3899 
3900 	iwmr->stag = stag;
3901 	iwmr->ibmr.iova = *iova_start;
3902 	iwmr->ibmr.rkey = stag;
3903 	iwmr->ibmr.lkey = stag;
3904 	iwmr->page_cnt = 1;
3905 	iwmr->pgaddrmem[0] = addr;
3906 	iwmr->len = size;
3907 	iwmr->page_size = SZ_4K;
3908 	ret = irdma_hwreg_mr(iwdev, iwmr, access);
3909 	if (ret) {
3910 		irdma_free_stag(iwdev, stag);
3911 		goto err;
3912 	}
3913 
3914 	return &iwmr->ibmr;
3915 
3916 err:
3917 	kfree(iwmr);
3918 
3919 	return ERR_PTR(ret);
3920 }
3921 
3922 /**
3923  * irdma_get_dma_mr - register physical mem
3924  * @pd: ptr of pd
3925  * @acc: access for memory
3926  */
irdma_get_dma_mr(struct ib_pd * pd,int acc)3927 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
3928 {
3929 	u64 kva = 0;
3930 
3931 	return irdma_reg_phys_mr(pd, 0, 0, acc, &kva, true);
3932 }
3933 
3934 /**
3935  * irdma_del_memlist - Deleting pbl list entries for CQ/QP
3936  * @iwmr: iwmr for IB's user page addresses
3937  * @ucontext: ptr to user context
3938  */
irdma_del_memlist(struct irdma_mr * iwmr,struct irdma_ucontext * ucontext)3939 static void irdma_del_memlist(struct irdma_mr *iwmr,
3940 			      struct irdma_ucontext *ucontext)
3941 {
3942 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3943 	unsigned long flags;
3944 
3945 	switch (iwmr->type) {
3946 	case IRDMA_MEMREG_TYPE_CQ:
3947 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3948 		if (iwpbl->on_list) {
3949 			iwpbl->on_list = false;
3950 			list_del(&iwpbl->list);
3951 		}
3952 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3953 		break;
3954 	case IRDMA_MEMREG_TYPE_QP:
3955 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3956 		if (iwpbl->on_list) {
3957 			iwpbl->on_list = false;
3958 			list_del(&iwpbl->list);
3959 		}
3960 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3961 		break;
3962 	case IRDMA_MEMREG_TYPE_SRQ:
3963 		spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
3964 		if (iwpbl->on_list) {
3965 			iwpbl->on_list = false;
3966 			list_del(&iwpbl->list);
3967 		}
3968 		spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
3969 		break;
3970 	default:
3971 		break;
3972 	}
3973 }
3974 
3975 /**
3976  * irdma_dereg_mr - deregister mr
3977  * @ib_mr: mr ptr for dereg
3978  * @udata: user data
3979  */
irdma_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3980 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3981 {
3982 	struct irdma_mr *iwmr = to_iwmr(ib_mr);
3983 	struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3984 	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3985 	bool dmabuf_revocable = iwmr->region && iwmr->region->is_dmabuf;
3986 	int ret;
3987 
3988 	if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3989 		if (iwmr->region) {
3990 			struct irdma_ucontext *ucontext;
3991 
3992 			ucontext = rdma_udata_to_drv_context(udata,
3993 						struct irdma_ucontext,
3994 						ibucontext);
3995 			irdma_del_memlist(iwmr, ucontext);
3996 		}
3997 		goto done;
3998 	}
3999 
4000 	if (!dmabuf_revocable) {
4001 		ret = irdma_hwdereg_mr(ib_mr);
4002 		if (ret)
4003 			return ret;
4004 
4005 		irdma_free_stag(iwdev, iwmr->stag);
4006 	}
4007 done:
4008 	if (iwmr->region)
4009 		/* For dmabuf MRs, ib_umem_release will trigger a synchronous
4010 		 * call to the revoke callback which will perform the actual HW
4011 		 * invalidation via irdma_hwdereg_mr. We rely on this for its
4012 		 * implicit serialization w.r.t. concurrent revocations. This
4013 		 * must be done before freeing the PBLEs.
4014 		 */
4015 		ib_umem_release(iwmr->region);
4016 
4017 	if (iwpbl->pbl_allocated)
4018 		irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
4019 
4020 	if (dmabuf_revocable)
4021 		irdma_free_stag(iwdev, iwmr->stag);
4022 
4023 	kfree(iwmr);
4024 
4025 	return 0;
4026 }
4027 
4028 /**
4029  * irdma_post_send -  kernel application wr
4030  * @ibqp: qp ptr for wr
4031  * @ib_wr: work request ptr
4032  * @bad_wr: return of bad wr if err
4033  */
irdma_post_send(struct ib_qp * ibqp,const struct ib_send_wr * ib_wr,const struct ib_send_wr ** bad_wr)4034 static int irdma_post_send(struct ib_qp *ibqp,
4035 			   const struct ib_send_wr *ib_wr,
4036 			   const struct ib_send_wr **bad_wr)
4037 {
4038 	struct irdma_qp *iwqp;
4039 	struct irdma_qp_uk *ukqp;
4040 	struct irdma_sc_dev *dev;
4041 	struct irdma_post_sq_info info;
4042 	int err = 0;
4043 	unsigned long flags;
4044 	bool inv_stag;
4045 	struct irdma_ah *ah;
4046 
4047 	iwqp = to_iwqp(ibqp);
4048 	ukqp = &iwqp->sc_qp.qp_uk;
4049 	dev = &iwqp->iwdev->rf->sc_dev;
4050 
4051 	spin_lock_irqsave(&iwqp->lock, flags);
4052 	while (ib_wr) {
4053 		memset(&info, 0, sizeof(info));
4054 		inv_stag = false;
4055 		info.wr_id = (ib_wr->wr_id);
4056 		if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
4057 			info.signaled = true;
4058 		if (ib_wr->send_flags & IB_SEND_FENCE)
4059 			info.read_fence = true;
4060 		switch (ib_wr->opcode) {
4061 		case IB_WR_ATOMIC_CMP_AND_SWP:
4062 			if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
4063 				       IRDMA_FEATURE_ATOMIC_OPS))) {
4064 				err = -EINVAL;
4065 				break;
4066 			}
4067 			info.op_type = IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP;
4068 			info.op.atomic_compare_swap.tagged_offset = ib_wr->sg_list[0].addr;
4069 			info.op.atomic_compare_swap.remote_tagged_offset =
4070 				atomic_wr(ib_wr)->remote_addr;
4071 			info.op.atomic_compare_swap.swap_data_bytes = atomic_wr(ib_wr)->swap;
4072 			info.op.atomic_compare_swap.compare_data_bytes =
4073 				atomic_wr(ib_wr)->compare_add;
4074 			info.op.atomic_compare_swap.stag = ib_wr->sg_list[0].lkey;
4075 			info.op.atomic_compare_swap.remote_stag = atomic_wr(ib_wr)->rkey;
4076 			err = irdma_uk_atomic_compare_swap(ukqp, &info, false);
4077 			break;
4078 		case IB_WR_ATOMIC_FETCH_AND_ADD:
4079 			if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
4080 				       IRDMA_FEATURE_ATOMIC_OPS))) {
4081 				err = -EINVAL;
4082 				break;
4083 			}
4084 			info.op_type = IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD;
4085 			info.op.atomic_fetch_add.tagged_offset = ib_wr->sg_list[0].addr;
4086 			info.op.atomic_fetch_add.remote_tagged_offset =
4087 				atomic_wr(ib_wr)->remote_addr;
4088 			info.op.atomic_fetch_add.fetch_add_data_bytes =
4089 				atomic_wr(ib_wr)->compare_add;
4090 			info.op.atomic_fetch_add.stag = ib_wr->sg_list[0].lkey;
4091 			info.op.atomic_fetch_add.remote_stag =
4092 				atomic_wr(ib_wr)->rkey;
4093 			err = irdma_uk_atomic_fetch_add(ukqp, &info, false);
4094 			break;
4095 		case IB_WR_SEND_WITH_IMM:
4096 			if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
4097 				info.imm_data_valid = true;
4098 				info.imm_data = ntohl(ib_wr->ex.imm_data);
4099 			} else {
4100 				err = -EINVAL;
4101 				break;
4102 			}
4103 			fallthrough;
4104 		case IB_WR_SEND:
4105 		case IB_WR_SEND_WITH_INV:
4106 			if (ib_wr->opcode == IB_WR_SEND ||
4107 			    ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
4108 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
4109 					info.op_type = IRDMA_OP_TYPE_SEND_SOL;
4110 				else
4111 					info.op_type = IRDMA_OP_TYPE_SEND;
4112 			} else {
4113 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
4114 					info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
4115 				else
4116 					info.op_type = IRDMA_OP_TYPE_SEND_INV;
4117 				info.stag_to_inv = ib_wr->ex.invalidate_rkey;
4118 			}
4119 
4120 			info.op.send.num_sges = ib_wr->num_sge;
4121 			info.op.send.sg_list = ib_wr->sg_list;
4122 			if (iwqp->ibqp.qp_type == IB_QPT_UD ||
4123 			    iwqp->ibqp.qp_type == IB_QPT_GSI) {
4124 				ah = to_iwah(ud_wr(ib_wr)->ah);
4125 				info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
4126 				info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
4127 				info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
4128 			}
4129 
4130 			if (ib_wr->send_flags & IB_SEND_INLINE)
4131 				err = irdma_uk_inline_send(ukqp, &info, false);
4132 			else
4133 				err = irdma_uk_send(ukqp, &info, false);
4134 			break;
4135 		case IB_WR_RDMA_WRITE_WITH_IMM:
4136 			if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
4137 				info.imm_data_valid = true;
4138 				info.imm_data = ntohl(ib_wr->ex.imm_data);
4139 			} else {
4140 				err = -EINVAL;
4141 				break;
4142 			}
4143 			fallthrough;
4144 		case IB_WR_RDMA_WRITE:
4145 			if (ib_wr->send_flags & IB_SEND_SOLICITED)
4146 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
4147 			else
4148 				info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
4149 
4150 			info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
4151 			info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
4152 			info.op.rdma_write.rem_addr.addr =
4153 				rdma_wr(ib_wr)->remote_addr;
4154 			info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
4155 			if (ib_wr->send_flags & IB_SEND_INLINE)
4156 				err = irdma_uk_inline_rdma_write(ukqp, &info, false);
4157 			else
4158 				err = irdma_uk_rdma_write(ukqp, &info, false);
4159 			break;
4160 		case IB_WR_RDMA_READ_WITH_INV:
4161 			inv_stag = true;
4162 			fallthrough;
4163 		case IB_WR_RDMA_READ:
4164 			if (ib_wr->num_sge >
4165 			    dev->hw_attrs.uk_attrs.max_hw_read_sges) {
4166 				err = -EINVAL;
4167 				break;
4168 			}
4169 			info.op_type = IRDMA_OP_TYPE_RDMA_READ;
4170 			info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
4171 			info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
4172 			info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
4173 			info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
4174 			err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
4175 			break;
4176 		case IB_WR_LOCAL_INV:
4177 			info.op_type = IRDMA_OP_TYPE_INV_STAG;
4178 			info.local_fence = true;
4179 			info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
4180 			err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
4181 			break;
4182 		case IB_WR_REG_MR: {
4183 			struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
4184 			struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
4185 			struct irdma_fast_reg_stag_info stag_info = {};
4186 
4187 			stag_info.signaled = info.signaled;
4188 			stag_info.read_fence = info.read_fence;
4189 			stag_info.access_rights =
4190 				irdma_get_mr_access(reg_wr(ib_wr)->access,
4191 						    dev->hw_attrs.uk_attrs.hw_rev);
4192 			stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
4193 			stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
4194 			stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
4195 			stag_info.wr_id = ib_wr->wr_id;
4196 			stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
4197 			stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
4198 			stag_info.total_len = iwmr->ibmr.length;
4199 			stag_info.reg_addr_pa = *palloc->level1.addr;
4200 			stag_info.first_pm_pbl_index = palloc->level1.idx;
4201 			stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
4202 			if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
4203 				stag_info.chunk_size = 1;
4204 			err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
4205 							true);
4206 			break;
4207 		}
4208 		default:
4209 			err = -EINVAL;
4210 			ibdev_dbg(&iwqp->iwdev->ibdev,
4211 				  "VERBS: upost_send bad opcode = 0x%x\n",
4212 				  ib_wr->opcode);
4213 			break;
4214 		}
4215 
4216 		if (err)
4217 			break;
4218 		ib_wr = ib_wr->next;
4219 	}
4220 
4221 	if (!iwqp->flush_issued) {
4222 		if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
4223 			irdma_uk_qp_post_wr(ukqp);
4224 		spin_unlock_irqrestore(&iwqp->lock, flags);
4225 	} else {
4226 		spin_unlock_irqrestore(&iwqp->lock, flags);
4227 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
4228 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
4229 	}
4230 
4231 	if (err)
4232 		*bad_wr = ib_wr;
4233 
4234 	return err;
4235 }
4236 
4237 /**
4238  * irdma_post_srq_recv - post receive wr for kernel application
4239  * @ibsrq: ib srq pointer
4240  * @ib_wr: work request for receive
4241  * @bad_wr: bad wr caused an error
4242  */
irdma_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * ib_wr,const struct ib_recv_wr ** bad_wr)4243 static int irdma_post_srq_recv(struct ib_srq *ibsrq,
4244 			       const struct ib_recv_wr *ib_wr,
4245 			       const struct ib_recv_wr **bad_wr)
4246 {
4247 	struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
4248 	struct irdma_srq_uk *uksrq = &iwsrq->sc_srq.srq_uk;
4249 	struct irdma_post_rq_info post_recv = {};
4250 	unsigned long flags;
4251 	int err = 0;
4252 
4253 	spin_lock_irqsave(&iwsrq->lock, flags);
4254 	while (ib_wr) {
4255 		if (ib_wr->num_sge > uksrq->max_srq_frag_cnt) {
4256 			err = -EINVAL;
4257 			goto out;
4258 		}
4259 		post_recv.num_sges = ib_wr->num_sge;
4260 		post_recv.wr_id = ib_wr->wr_id;
4261 		post_recv.sg_list = ib_wr->sg_list;
4262 		err = irdma_uk_srq_post_receive(uksrq, &post_recv);
4263 		if (err)
4264 			goto out;
4265 
4266 		ib_wr = ib_wr->next;
4267 	}
4268 
4269 out:
4270 	spin_unlock_irqrestore(&iwsrq->lock, flags);
4271 
4272 	if (err)
4273 		*bad_wr = ib_wr;
4274 
4275 	return err;
4276 }
4277 
4278 /**
4279  * irdma_post_recv - post receive wr for kernel application
4280  * @ibqp: ib qp pointer
4281  * @ib_wr: work request for receive
4282  * @bad_wr: bad wr caused an error
4283  */
irdma_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * ib_wr,const struct ib_recv_wr ** bad_wr)4284 static int irdma_post_recv(struct ib_qp *ibqp,
4285 			   const struct ib_recv_wr *ib_wr,
4286 			   const struct ib_recv_wr **bad_wr)
4287 {
4288 	struct irdma_qp *iwqp;
4289 	struct irdma_qp_uk *ukqp;
4290 	struct irdma_post_rq_info post_recv = {};
4291 	unsigned long flags;
4292 	int err = 0;
4293 
4294 	iwqp = to_iwqp(ibqp);
4295 	ukqp = &iwqp->sc_qp.qp_uk;
4296 
4297 	if (ukqp->srq_uk) {
4298 		*bad_wr = ib_wr;
4299 		return -EINVAL;
4300 	}
4301 
4302 	spin_lock_irqsave(&iwqp->lock, flags);
4303 	while (ib_wr) {
4304 		post_recv.num_sges = ib_wr->num_sge;
4305 		post_recv.wr_id = ib_wr->wr_id;
4306 		post_recv.sg_list = ib_wr->sg_list;
4307 		err = irdma_uk_post_receive(ukqp, &post_recv);
4308 		if (err) {
4309 			ibdev_dbg(&iwqp->iwdev->ibdev,
4310 				  "VERBS: post_recv err %d\n", err);
4311 			goto out;
4312 		}
4313 
4314 		ib_wr = ib_wr->next;
4315 	}
4316 
4317 out:
4318 	spin_unlock_irqrestore(&iwqp->lock, flags);
4319 	if (iwqp->flush_issued)
4320 		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
4321 				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
4322 
4323 	if (err)
4324 		*bad_wr = ib_wr;
4325 
4326 	return err;
4327 }
4328 
4329 /**
4330  * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
4331  * @opcode: iwarp flush code
4332  */
irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)4333 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
4334 {
4335 	switch (opcode) {
4336 	case FLUSH_PROT_ERR:
4337 		return IB_WC_LOC_PROT_ERR;
4338 	case FLUSH_REM_ACCESS_ERR:
4339 		return IB_WC_REM_ACCESS_ERR;
4340 	case FLUSH_LOC_QP_OP_ERR:
4341 		return IB_WC_LOC_QP_OP_ERR;
4342 	case FLUSH_REM_OP_ERR:
4343 		return IB_WC_REM_OP_ERR;
4344 	case FLUSH_LOC_LEN_ERR:
4345 		return IB_WC_LOC_LEN_ERR;
4346 	case FLUSH_GENERAL_ERR:
4347 		return IB_WC_WR_FLUSH_ERR;
4348 	case FLUSH_RETRY_EXC_ERR:
4349 		return IB_WC_RETRY_EXC_ERR;
4350 	case FLUSH_MW_BIND_ERR:
4351 		return IB_WC_MW_BIND_ERR;
4352 	case FLUSH_REM_INV_REQ_ERR:
4353 		return IB_WC_REM_INV_REQ_ERR;
4354 	case FLUSH_RNR_RETRY_EXC_ERR:
4355 		return IB_WC_RNR_RETRY_EXC_ERR;
4356 	case FLUSH_FATAL_ERR:
4357 	default:
4358 		return IB_WC_FATAL_ERR;
4359 	}
4360 }
4361 
4362 /**
4363  * irdma_process_cqe - process cqe info
4364  * @entry: processed cqe
4365  * @cq_poll_info: cqe info
4366  */
irdma_process_cqe(struct ib_wc * entry,struct irdma_cq_poll_info * cq_poll_info)4367 static void irdma_process_cqe(struct ib_wc *entry,
4368 			      struct irdma_cq_poll_info *cq_poll_info)
4369 {
4370 	struct irdma_sc_qp *qp;
4371 
4372 	entry->wc_flags = 0;
4373 	entry->pkey_index = 0;
4374 	entry->wr_id = cq_poll_info->wr_id;
4375 
4376 	qp = cq_poll_info->qp_handle;
4377 	entry->qp = qp->qp_uk.back_qp;
4378 
4379 	if (cq_poll_info->error) {
4380 		entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
4381 				irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
4382 
4383 		entry->vendor_err = cq_poll_info->major_err << 16 |
4384 				    cq_poll_info->minor_err;
4385 	} else {
4386 		entry->status = IB_WC_SUCCESS;
4387 		if (cq_poll_info->imm_valid) {
4388 			entry->ex.imm_data = htonl(cq_poll_info->imm_data);
4389 			entry->wc_flags |= IB_WC_WITH_IMM;
4390 		}
4391 		if (cq_poll_info->ud_smac_valid) {
4392 			ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
4393 			entry->wc_flags |= IB_WC_WITH_SMAC;
4394 		}
4395 
4396 		if (cq_poll_info->ud_vlan_valid) {
4397 			u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
4398 
4399 			entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
4400 			if (vlan) {
4401 				entry->vlan_id = vlan;
4402 				entry->wc_flags |= IB_WC_WITH_VLAN;
4403 			}
4404 		} else {
4405 			entry->sl = 0;
4406 		}
4407 	}
4408 
4409 	if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
4410 		set_ib_wc_op_sq(cq_poll_info, entry);
4411 	} else {
4412 		if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
4413 			set_ib_wc_op_rq(cq_poll_info, entry,
4414 					qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
4415 					true : false);
4416 		else
4417 			set_ib_wc_op_rq_gen_3(cq_poll_info, entry);
4418 		if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
4419 		    cq_poll_info->stag_invalid_set) {
4420 			entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
4421 			entry->wc_flags |= IB_WC_WITH_INVALIDATE;
4422 		}
4423 	}
4424 
4425 	if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
4426 		entry->src_qp = cq_poll_info->ud_src_qpn;
4427 		entry->slid = 0;
4428 		entry->wc_flags |=
4429 			(IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
4430 		entry->network_hdr_type = cq_poll_info->ipv4 ?
4431 						  RDMA_NETWORK_IPV4 :
4432 						  RDMA_NETWORK_IPV6;
4433 	} else {
4434 		entry->src_qp = cq_poll_info->qp_id;
4435 	}
4436 
4437 	entry->byte_len = cq_poll_info->bytes_xfered;
4438 }
4439 
4440 /**
4441  * irdma_poll_one - poll one entry of the CQ
4442  * @ukcq: ukcq to poll
4443  * @cur_cqe: current CQE info to be filled in
4444  * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
4445  *
4446  * Returns the internal irdma device error code or 0 on success
4447  */
irdma_poll_one(struct irdma_cq_uk * ukcq,struct irdma_cq_poll_info * cur_cqe,struct ib_wc * entry)4448 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
4449 				 struct irdma_cq_poll_info *cur_cqe,
4450 				 struct ib_wc *entry)
4451 {
4452 	int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
4453 
4454 	if (ret)
4455 		return ret;
4456 
4457 	irdma_process_cqe(entry, cur_cqe);
4458 
4459 	return 0;
4460 }
4461 
4462 /**
4463  * __irdma_poll_cq - poll cq for completion (kernel apps)
4464  * @iwcq: cq to poll
4465  * @num_entries: number of entries to poll
4466  * @entry: wr of a completed entry
4467  */
__irdma_poll_cq(struct irdma_cq * iwcq,int num_entries,struct ib_wc * entry)4468 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
4469 {
4470 	struct list_head *tmp_node, *list_node;
4471 	struct irdma_cq_buf *last_buf = NULL;
4472 	struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
4473 	struct irdma_cq_buf *cq_buf;
4474 	int ret;
4475 	struct irdma_device *iwdev;
4476 	struct irdma_cq_uk *ukcq;
4477 	bool cq_new_cqe = false;
4478 	int resized_bufs = 0;
4479 	int npolled = 0;
4480 
4481 	iwdev = to_iwdev(iwcq->ibcq.device);
4482 	ukcq = &iwcq->sc_cq.cq_uk;
4483 
4484 	/* go through the list of previously resized CQ buffers */
4485 	list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
4486 		cq_buf = container_of(list_node, struct irdma_cq_buf, list);
4487 		while (npolled < num_entries) {
4488 			ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
4489 			if (!ret) {
4490 				++npolled;
4491 				cq_new_cqe = true;
4492 				continue;
4493 			}
4494 			if (ret == -ENOENT)
4495 				break;
4496 			 /* QP using the CQ is destroyed. Skip reporting this CQE */
4497 			if (ret == -EFAULT) {
4498 				cq_new_cqe = true;
4499 				continue;
4500 			}
4501 			goto error;
4502 		}
4503 
4504 		/* save the resized CQ buffer which received the last cqe */
4505 		if (cq_new_cqe)
4506 			last_buf = cq_buf;
4507 		cq_new_cqe = false;
4508 	}
4509 
4510 	/* check the current CQ for new cqes */
4511 	while (npolled < num_entries) {
4512 		ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
4513 		if (ret == -ENOENT) {
4514 			ret = irdma_generated_cmpls(iwcq, cur_cqe);
4515 			if (!ret)
4516 				irdma_process_cqe(entry + npolled, cur_cqe);
4517 		}
4518 		if (!ret) {
4519 			++npolled;
4520 			cq_new_cqe = true;
4521 			continue;
4522 		}
4523 
4524 		if (ret == -ENOENT)
4525 			break;
4526 		/* QP using the CQ is destroyed. Skip reporting this CQE */
4527 		if (ret == -EFAULT) {
4528 			cq_new_cqe = true;
4529 			continue;
4530 		}
4531 		goto error;
4532 	}
4533 
4534 	if (cq_new_cqe)
4535 		/* all previous CQ resizes are complete */
4536 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
4537 	else if (last_buf)
4538 		/* only CQ resizes up to the last_buf are complete */
4539 		resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
4540 	if (resized_bufs)
4541 		/* report to the HW the number of complete CQ resizes */
4542 		irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
4543 
4544 	return npolled;
4545 error:
4546 	ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
4547 		  __func__, ret);
4548 
4549 	return ret;
4550 }
4551 
4552 /**
4553  * irdma_poll_cq - poll cq for completion (kernel apps)
4554  * @ibcq: cq to poll
4555  * @num_entries: number of entries to poll
4556  * @entry: wr of a completed entry
4557  */
irdma_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)4558 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
4559 			 struct ib_wc *entry)
4560 {
4561 	struct irdma_cq *iwcq;
4562 	unsigned long flags;
4563 	int ret;
4564 
4565 	iwcq = to_iwcq(ibcq);
4566 
4567 	spin_lock_irqsave(&iwcq->lock, flags);
4568 	ret = __irdma_poll_cq(iwcq, num_entries, entry);
4569 	spin_unlock_irqrestore(&iwcq->lock, flags);
4570 
4571 	return ret;
4572 }
4573 
4574 /**
4575  * irdma_req_notify_cq - arm cq kernel application
4576  * @ibcq: cq to arm
4577  * @notify_flags: notofication flags
4578  */
irdma_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)4579 static int irdma_req_notify_cq(struct ib_cq *ibcq,
4580 			       enum ib_cq_notify_flags notify_flags)
4581 {
4582 	struct irdma_cq *iwcq;
4583 	struct irdma_cq_uk *ukcq;
4584 	unsigned long flags;
4585 	enum irdma_cmpl_notify cq_notify;
4586 	bool promo_event = false;
4587 	int ret = 0;
4588 
4589 	cq_notify = notify_flags == IB_CQ_SOLICITED ?
4590 		    IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
4591 	iwcq = to_iwcq(ibcq);
4592 	ukcq = &iwcq->sc_cq.cq_uk;
4593 
4594 	spin_lock_irqsave(&iwcq->lock, flags);
4595 	/* Only promote to arm the CQ for any event if the last arm event was solicited. */
4596 	if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
4597 		promo_event = true;
4598 
4599 	if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
4600 		iwcq->last_notify = cq_notify;
4601 		irdma_uk_cq_request_notification(ukcq, cq_notify);
4602 	}
4603 
4604 	if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
4605 	    (!irdma_uk_cq_empty(ukcq) || !list_empty(&iwcq->cmpl_generated)))
4606 		ret = 1;
4607 	spin_unlock_irqrestore(&iwcq->lock, flags);
4608 
4609 	return ret;
4610 }
4611 
4612 static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
4613 	/* gen1 - 32-bit */
4614 	[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name		= "ip4InDiscards",
4615 	[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name		= "ip4InTruncatedPkts",
4616 	[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name		= "ip4OutNoRoutes",
4617 	[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name		= "ip6InDiscards",
4618 	[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name		= "ip6InTruncatedPkts",
4619 	[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name		= "ip6OutNoRoutes",
4620 	[IRDMA_HW_STAT_INDEX_RXVLANERR].name		= "rxVlanErrors",
4621 	/* gen1 - 64-bit */
4622 	[IRDMA_HW_STAT_INDEX_IP4RXOCTS].name		= "ip4InOctets",
4623 	[IRDMA_HW_STAT_INDEX_IP4RXPKTS].name		= "ip4InPkts",
4624 	[IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name		= "ip4InReasmRqd",
4625 	[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name		= "ip4InMcastPkts",
4626 	[IRDMA_HW_STAT_INDEX_IP4TXOCTS].name		= "ip4OutOctets",
4627 	[IRDMA_HW_STAT_INDEX_IP4TXPKTS].name		= "ip4OutPkts",
4628 	[IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name		= "ip4OutSegRqd",
4629 	[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name		= "ip4OutMcastPkts",
4630 	[IRDMA_HW_STAT_INDEX_IP6RXOCTS].name		= "ip6InOctets",
4631 	[IRDMA_HW_STAT_INDEX_IP6RXPKTS].name		= "ip6InPkts",
4632 	[IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name		= "ip6InReasmRqd",
4633 	[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name		= "ip6InMcastPkts",
4634 	[IRDMA_HW_STAT_INDEX_IP6TXOCTS].name		= "ip6OutOctets",
4635 	[IRDMA_HW_STAT_INDEX_IP6TXPKTS].name		= "ip6OutPkts",
4636 	[IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name		= "ip6OutSegRqd",
4637 	[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name		= "ip6OutMcastPkts",
4638 	[IRDMA_HW_STAT_INDEX_RDMARXRDS].name		= "InRdmaReads",
4639 	[IRDMA_HW_STAT_INDEX_RDMARXSNDS].name		= "InRdmaSends",
4640 	[IRDMA_HW_STAT_INDEX_RDMARXWRS].name		= "InRdmaWrites",
4641 	[IRDMA_HW_STAT_INDEX_RDMATXRDS].name		= "OutRdmaReads",
4642 	[IRDMA_HW_STAT_INDEX_RDMATXSNDS].name		= "OutRdmaSends",
4643 	[IRDMA_HW_STAT_INDEX_RDMATXWRS].name		= "OutRdmaWrites",
4644 	[IRDMA_HW_STAT_INDEX_RDMAVBND].name		= "RdmaBnd",
4645 	[IRDMA_HW_STAT_INDEX_RDMAVINV].name		= "RdmaInv",
4646 
4647 	/* gen2 - 32-bit */
4648 	[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name	= "cnpHandled",
4649 	[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name	= "cnpIgnored",
4650 	[IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name		= "cnpSent",
4651 	/* gen2 - 64-bit */
4652 	[IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name		= "ip4InMcastOctets",
4653 	[IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name		= "ip4OutMcastOctets",
4654 	[IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name		= "ip6InMcastOctets",
4655 	[IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name		= "ip6OutMcastOctets",
4656 	[IRDMA_HW_STAT_INDEX_UDPRXPKTS].name		= "RxUDP",
4657 	[IRDMA_HW_STAT_INDEX_UDPTXPKTS].name		= "TxUDP",
4658 	[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name	= "RxECNMrkd",
4659 	[IRDMA_HW_STAT_INDEX_TCPRTXSEG].name		= "RetransSegs",
4660 	[IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name		= "InOptErrors",
4661 	[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name	= "InProtoErrors",
4662 	[IRDMA_HW_STAT_INDEX_TCPRXSEGS].name		= "InSegs",
4663 	[IRDMA_HW_STAT_INDEX_TCPTXSEG].name		= "OutSegs",
4664 
4665 	/* gen3 */
4666 	[IRDMA_HW_STAT_INDEX_RNR_SENT].name		= "RNR sent",
4667 	[IRDMA_HW_STAT_INDEX_RNR_RCVD].name		= "RNR received",
4668 	[IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT].name	= "ord limit count",
4669 	[IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT].name	= "ird limit count",
4670 	[IRDMA_HW_STAT_INDEX_RDMARXATS].name		= "Rx atomics",
4671 	[IRDMA_HW_STAT_INDEX_RDMATXATS].name		= "Tx atomics",
4672 	[IRDMA_HW_STAT_INDEX_NAKSEQERR].name		= "Nak Sequence Error",
4673 	[IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED].name	= "Nak Sequence Error Implied",
4674 	[IRDMA_HW_STAT_INDEX_RTO].name			= "RTO",
4675 	[IRDMA_HW_STAT_INDEX_RXOOOPKTS].name		= "Rcvd Out of order packets",
4676 	[IRDMA_HW_STAT_INDEX_ICRCERR].name		= "CRC errors",
4677 };
4678 
irdma_roce_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)4679 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
4680 				     struct ib_port_immutable *immutable)
4681 {
4682 	struct ib_port_attr attr;
4683 	int err;
4684 
4685 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
4686 	err = ib_query_port(ibdev, port_num, &attr);
4687 	if (err)
4688 		return err;
4689 
4690 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
4691 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
4692 	immutable->gid_tbl_len = attr.gid_tbl_len;
4693 
4694 	return 0;
4695 }
4696 
irdma_iw_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)4697 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
4698 				   struct ib_port_immutable *immutable)
4699 {
4700 	struct ib_port_attr attr;
4701 	int err;
4702 
4703 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
4704 	err = ib_query_port(ibdev, port_num, &attr);
4705 	if (err)
4706 		return err;
4707 	immutable->gid_tbl_len = attr.gid_tbl_len;
4708 
4709 	return 0;
4710 }
4711 
irdma_get_dev_fw_str(struct ib_device * dev,char * str)4712 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
4713 {
4714 	struct irdma_device *iwdev = to_iwdev(dev);
4715 
4716 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
4717 		 irdma_fw_major_ver(&iwdev->rf->sc_dev),
4718 		 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
4719 }
4720 
4721 /**
4722  * irdma_alloc_hw_port_stats - Allocate a hw stats structure
4723  * @ibdev: device pointer from stack
4724  * @port_num: port number
4725  */
irdma_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)4726 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
4727 						       u32 port_num)
4728 {
4729 	struct irdma_device *iwdev = to_iwdev(ibdev);
4730 	struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
4731 
4732 	int num_counters = dev->hw_attrs.max_stat_idx;
4733 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
4734 
4735 	return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
4736 					  lifespan);
4737 }
4738 
4739 /**
4740  * irdma_get_hw_stats - Populates the rdma_hw_stats structure
4741  * @ibdev: device pointer from stack
4742  * @stats: stats pointer from stack
4743  * @port_num: port number
4744  * @index: which hw counter the stack is requesting we update
4745  */
irdma_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port_num,int index)4746 static int irdma_get_hw_stats(struct ib_device *ibdev,
4747 			      struct rdma_hw_stats *stats, u32 port_num,
4748 			      int index)
4749 {
4750 	struct irdma_device *iwdev = to_iwdev(ibdev);
4751 	struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
4752 
4753 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
4754 		irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
4755 	else
4756 		irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
4757 
4758 	memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
4759 
4760 	return stats->num_counters;
4761 }
4762 
4763 /**
4764  * irdma_query_gid - Query port GID
4765  * @ibdev: device pointer from stack
4766  * @port: port number
4767  * @index: Entry index
4768  * @gid: Global ID
4769  */
irdma_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)4770 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
4771 			   union ib_gid *gid)
4772 {
4773 	struct irdma_device *iwdev = to_iwdev(ibdev);
4774 
4775 	memset(gid->raw, 0, sizeof(gid->raw));
4776 	ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
4777 
4778 	return 0;
4779 }
4780 
4781 /**
4782  * mcast_list_add -  Add a new mcast item to list
4783  * @rf: RDMA PCI function
4784  * @new_elem: pointer to element to add
4785  */
mcast_list_add(struct irdma_pci_f * rf,struct mc_table_list * new_elem)4786 static void mcast_list_add(struct irdma_pci_f *rf,
4787 			   struct mc_table_list *new_elem)
4788 {
4789 	list_add(&new_elem->list, &rf->mc_qht_list.list);
4790 }
4791 
4792 /**
4793  * mcast_list_del - Remove an mcast item from list
4794  * @mc_qht_elem: pointer to mcast table list element
4795  */
mcast_list_del(struct mc_table_list * mc_qht_elem)4796 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
4797 {
4798 	if (mc_qht_elem)
4799 		list_del(&mc_qht_elem->list);
4800 }
4801 
4802 /**
4803  * mcast_list_lookup_ip - Search mcast list for address
4804  * @rf: RDMA PCI function
4805  * @ip_mcast: pointer to mcast IP address
4806  */
mcast_list_lookup_ip(struct irdma_pci_f * rf,u32 * ip_mcast)4807 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
4808 						  u32 *ip_mcast)
4809 {
4810 	struct mc_table_list *mc_qht_el;
4811 	struct list_head *pos, *q;
4812 
4813 	list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
4814 		mc_qht_el = list_entry(pos, struct mc_table_list, list);
4815 		if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
4816 			    sizeof(mc_qht_el->mc_info.dest_ip)))
4817 			return mc_qht_el;
4818 	}
4819 
4820 	return NULL;
4821 }
4822 
4823 /**
4824  * irdma_mcast_cqp_op - perform a mcast cqp operation
4825  * @iwdev: irdma device
4826  * @mc_grp_ctx: mcast group info
4827  * @op: operation
4828  *
4829  * returns error status
4830  */
irdma_mcast_cqp_op(struct irdma_device * iwdev,struct irdma_mcast_grp_info * mc_grp_ctx,u8 op)4831 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
4832 			      struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
4833 {
4834 	struct cqp_cmds_info *cqp_info;
4835 	struct irdma_cqp_request *cqp_request;
4836 	int status;
4837 
4838 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
4839 	if (!cqp_request)
4840 		return -ENOMEM;
4841 
4842 	cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
4843 	cqp_info = &cqp_request->info;
4844 	cqp_info->cqp_cmd = op;
4845 	cqp_info->post_sq = 1;
4846 	cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
4847 	cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
4848 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
4849 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
4850 
4851 	return status;
4852 }
4853 
4854 /**
4855  * irdma_mcast_mac - Get the multicast MAC for an IP address
4856  * @ip_addr: IPv4 or IPv6 address
4857  * @mac: pointer to result MAC address
4858  * @ipv4: flag indicating IPv4 or IPv6
4859  *
4860  */
irdma_mcast_mac(u32 * ip_addr,u8 * mac,bool ipv4)4861 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
4862 {
4863 	u8 *ip = (u8 *)ip_addr;
4864 
4865 	if (ipv4) {
4866 		unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
4867 						0x00, 0x00};
4868 
4869 		mac4[3] = ip[2] & 0x7F;
4870 		mac4[4] = ip[1];
4871 		mac4[5] = ip[0];
4872 		ether_addr_copy(mac, mac4);
4873 	} else {
4874 		unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
4875 						0x00, 0x00};
4876 
4877 		mac6[2] = ip[3];
4878 		mac6[3] = ip[2];
4879 		mac6[4] = ip[1];
4880 		mac6[5] = ip[0];
4881 		ether_addr_copy(mac, mac6);
4882 	}
4883 }
4884 
4885 /**
4886  * irdma_attach_mcast - attach a qp to a multicast group
4887  * @ibqp: ptr to qp
4888  * @ibgid: pointer to global ID
4889  * @lid: local ID
4890  *
4891  * returns error status
4892  */
irdma_attach_mcast(struct ib_qp * ibqp,union ib_gid * ibgid,u16 lid)4893 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4894 {
4895 	struct irdma_qp *iwqp = to_iwqp(ibqp);
4896 	struct irdma_device *iwdev = iwqp->iwdev;
4897 	struct irdma_pci_f *rf = iwdev->rf;
4898 	struct mc_table_list *mc_qht_elem;
4899 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4900 	unsigned long flags;
4901 	u32 ip_addr[4] = {};
4902 	u32 mgn;
4903 	u32 no_mgs;
4904 	int ret = 0;
4905 	bool ipv4;
4906 	u16 vlan_id;
4907 	union irdma_sockaddr sgid_addr;
4908 	unsigned char dmac[ETH_ALEN];
4909 
4910 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4911 
4912 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
4913 		irdma_copy_ip_ntohl(ip_addr,
4914 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4915 		irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
4916 		ipv4 = false;
4917 		ibdev_dbg(&iwdev->ibdev,
4918 			  "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
4919 			  ip_addr);
4920 		irdma_mcast_mac(ip_addr, dmac, false);
4921 	} else {
4922 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4923 		ipv4 = true;
4924 		vlan_id = irdma_get_vlan_ipv4(ip_addr);
4925 		irdma_mcast_mac(ip_addr, dmac, true);
4926 		ibdev_dbg(&iwdev->ibdev,
4927 			  "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
4928 			  ibqp->qp_num, ip_addr, dmac);
4929 	}
4930 
4931 	spin_lock_irqsave(&rf->qh_list_lock, flags);
4932 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4933 	if (!mc_qht_elem) {
4934 		struct irdma_dma_mem *dma_mem_mc;
4935 
4936 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4937 		mc_qht_elem = kzalloc_obj(*mc_qht_elem);
4938 		if (!mc_qht_elem)
4939 			return -ENOMEM;
4940 
4941 		mc_qht_elem->mc_info.ipv4_valid = ipv4;
4942 		memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
4943 		       sizeof(mc_qht_elem->mc_info.dest_ip));
4944 		ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
4945 				       &mgn, &rf->next_mcg);
4946 		if (ret) {
4947 			kfree(mc_qht_elem);
4948 			return -ENOMEM;
4949 		}
4950 
4951 		mc_qht_elem->mc_info.mgn = mgn;
4952 		dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4953 		dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4954 					 IRDMA_HW_PAGE_SIZE);
4955 		dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4956 						    dma_mem_mc->size,
4957 						    &dma_mem_mc->pa,
4958 						    GFP_KERNEL);
4959 		if (!dma_mem_mc->va) {
4960 			irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4961 			kfree(mc_qht_elem);
4962 			return -ENOMEM;
4963 		}
4964 
4965 		mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4966 		memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4967 		       sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4968 		mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4969 		mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4970 		if (vlan_id < VLAN_N_VID)
4971 			mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4972 		mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
4973 		mc_qht_elem->mc_grp_ctx.qs_handle =
4974 			iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4975 		ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4976 
4977 		spin_lock_irqsave(&rf->qh_list_lock, flags);
4978 		mcast_list_add(rf, mc_qht_elem);
4979 	} else {
4980 		if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4981 		    IRDMA_MAX_MGS_PER_CTX) {
4982 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4983 			return -ENOMEM;
4984 		}
4985 	}
4986 
4987 	mcg_info.qp_id = iwqp->ibqp.qp_num;
4988 	no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4989 	irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4990 	spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4991 
4992 	/* Only if there is a change do we need to modify or create */
4993 	if (!no_mgs) {
4994 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4995 					 IRDMA_OP_MC_CREATE);
4996 	} else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4997 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4998 					 IRDMA_OP_MC_MODIFY);
4999 	} else {
5000 		return 0;
5001 	}
5002 
5003 	if (ret)
5004 		goto error;
5005 
5006 	return 0;
5007 
5008 error:
5009 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
5010 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
5011 		mcast_list_del(mc_qht_elem);
5012 		dma_free_coherent(rf->hw.device,
5013 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
5014 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
5015 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
5016 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
5017 		irdma_free_rsrc(rf, rf->allocated_mcgs,
5018 				mc_qht_elem->mc_grp_ctx.mg_id);
5019 		kfree(mc_qht_elem);
5020 	}
5021 
5022 	return ret;
5023 }
5024 
5025 /**
5026  * irdma_detach_mcast - detach a qp from a multicast group
5027  * @ibqp: ptr to qp
5028  * @ibgid: pointer to global ID
5029  * @lid: local ID
5030  *
5031  * returns error status
5032  */
irdma_detach_mcast(struct ib_qp * ibqp,union ib_gid * ibgid,u16 lid)5033 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
5034 {
5035 	struct irdma_qp *iwqp = to_iwqp(ibqp);
5036 	struct irdma_device *iwdev = iwqp->iwdev;
5037 	struct irdma_pci_f *rf = iwdev->rf;
5038 	u32 ip_addr[4] = {};
5039 	struct mc_table_list *mc_qht_elem;
5040 	struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
5041 	int ret;
5042 	unsigned long flags;
5043 	union irdma_sockaddr sgid_addr;
5044 
5045 	rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
5046 	if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
5047 		irdma_copy_ip_ntohl(ip_addr,
5048 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
5049 	else
5050 		ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
5051 
5052 	spin_lock_irqsave(&rf->qh_list_lock, flags);
5053 	mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
5054 	if (!mc_qht_elem) {
5055 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
5056 		ibdev_dbg(&iwdev->ibdev,
5057 			  "VERBS: address not found MCG\n");
5058 		return 0;
5059 	}
5060 
5061 	mcg_info.qp_id = iwqp->ibqp.qp_num;
5062 	irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
5063 	if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
5064 		mcast_list_del(mc_qht_elem);
5065 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
5066 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
5067 					 IRDMA_OP_MC_DESTROY);
5068 		if (ret) {
5069 			ibdev_dbg(&iwdev->ibdev,
5070 				  "VERBS: failed MC_DESTROY MCG\n");
5071 			spin_lock_irqsave(&rf->qh_list_lock, flags);
5072 			mcast_list_add(rf, mc_qht_elem);
5073 			spin_unlock_irqrestore(&rf->qh_list_lock, flags);
5074 			return -EAGAIN;
5075 		}
5076 
5077 		dma_free_coherent(rf->hw.device,
5078 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
5079 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
5080 				  mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
5081 		mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
5082 		irdma_free_rsrc(rf, rf->allocated_mcgs,
5083 				mc_qht_elem->mc_grp_ctx.mg_id);
5084 		kfree(mc_qht_elem);
5085 	} else {
5086 		spin_unlock_irqrestore(&rf->qh_list_lock, flags);
5087 		ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
5088 					 IRDMA_OP_MC_MODIFY);
5089 		if (ret) {
5090 			ibdev_dbg(&iwdev->ibdev,
5091 				  "VERBS: failed Modify MCG\n");
5092 			return ret;
5093 		}
5094 	}
5095 
5096 	return 0;
5097 }
5098 
irdma_create_hw_ah(struct irdma_device * iwdev,struct irdma_ah * ah,bool sleep)5099 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
5100 {
5101 	struct irdma_pci_f *rf = iwdev->rf;
5102 	int err;
5103 
5104 	err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
5105 			       &rf->next_ah);
5106 	if (err)
5107 		return err;
5108 
5109 	err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
5110 			      irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
5111 
5112 	if (err) {
5113 		ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
5114 		goto err_ah_create;
5115 	}
5116 
5117 	if (!sleep) {
5118 		const u64 tmout_ms = irdma_get_timeout_threshold(&rf->sc_dev) *
5119 			CQP_COMPL_WAIT_TIME_MS;
5120 
5121 		if (poll_timeout_us_atomic(irdma_cqp_ce_handler(rf,
5122 								&rf->ccq.sc_cq),
5123 					   ah->sc_ah.ah_info.ah_valid, 1,
5124 					   tmout_ms * USEC_PER_MSEC, false)) {
5125 			ibdev_dbg(&iwdev->ibdev,
5126 				  "VERBS: CQP create AH timed out");
5127 			err = -ETIMEDOUT;
5128 			goto err_ah_create;
5129 		}
5130 	}
5131 	return 0;
5132 
5133 err_ah_create:
5134 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
5135 
5136 	return err;
5137 }
5138 
irdma_setup_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * attr)5139 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
5140 {
5141 	struct irdma_pd *pd = to_iwpd(ibah->pd);
5142 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
5143 	struct rdma_ah_attr *ah_attr = attr->ah_attr;
5144 	const struct ib_gid_attr *sgid_attr;
5145 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
5146 	struct irdma_pci_f *rf = iwdev->rf;
5147 	struct irdma_sc_ah *sc_ah;
5148 	struct irdma_ah_info *ah_info;
5149 	union irdma_sockaddr sgid_addr, dgid_addr;
5150 	int err;
5151 	u8 dmac[ETH_ALEN];
5152 
5153 	ah->pd = pd;
5154 	sc_ah = &ah->sc_ah;
5155 	sc_ah->ah_info.vsi = &iwdev->vsi;
5156 	irdma_sc_init_ah(&rf->sc_dev, sc_ah);
5157 	ah->sgid_index = ah_attr->grh.sgid_index;
5158 	sgid_attr = ah_attr->grh.sgid_attr;
5159 	memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
5160 	rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
5161 	rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
5162 	ah->av.attrs = *ah_attr;
5163 	ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
5164 	ah_info = &sc_ah->ah_info;
5165 	ah_info->pd_idx = pd->sc_pd.pd_id;
5166 	if (ah_attr->ah_flags & IB_AH_GRH) {
5167 		ah_info->flow_label = ah_attr->grh.flow_label;
5168 		ah_info->hop_ttl = ah_attr->grh.hop_limit;
5169 		ah_info->tc_tos = ah_attr->grh.traffic_class;
5170 	}
5171 
5172 	ether_addr_copy(dmac, ah_attr->roce.dmac);
5173 	if (ah->av.net_type == RDMA_NETWORK_IPV4) {
5174 		ah_info->ipv4_valid = true;
5175 		ah_info->dest_ip_addr[0] =
5176 			ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
5177 		ah_info->src_ip_addr[0] =
5178 			ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
5179 		ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
5180 						     ah_info->dest_ip_addr[0]);
5181 		if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
5182 			ah_info->do_lpbk = true;
5183 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
5184 		}
5185 	} else {
5186 		irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
5187 				    dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
5188 		irdma_copy_ip_ntohl(ah_info->src_ip_addr,
5189 				    sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
5190 		ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
5191 						     ah_info->dest_ip_addr);
5192 		if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
5193 			ah_info->do_lpbk = true;
5194 			irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
5195 		}
5196 	}
5197 
5198 	err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
5199 				      ah_info->mac_addr);
5200 	if (err)
5201 		return err;
5202 
5203 	ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
5204 					      ah_info->ipv4_valid, dmac);
5205 
5206 	if (ah_info->dst_arpindex == -1)
5207 		return -EINVAL;
5208 
5209 	if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
5210 		ah_info->vlan_tag = 0;
5211 
5212 	if (ah_info->vlan_tag < VLAN_N_VID) {
5213 		u8 prio = rt_tos2priority(ah_info->tc_tos);
5214 
5215 		prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
5216 
5217 		ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
5218 		ah_info->insert_vlan_tag = true;
5219 	}
5220 
5221 	return 0;
5222 }
5223 
5224 /**
5225  * irdma_ah_exists - Check for existing identical AH
5226  * @iwdev: irdma device
5227  * @new_ah: AH to check for
5228  *
5229  * returns true if AH is found, false if not found.
5230  */
irdma_ah_exists(struct irdma_device * iwdev,struct irdma_ah * new_ah)5231 static bool irdma_ah_exists(struct irdma_device *iwdev,
5232 			    struct irdma_ah *new_ah)
5233 {
5234 	struct irdma_ah *ah;
5235 	u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
5236 		  new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
5237 		  new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
5238 		  new_ah->sc_ah.ah_info.dest_ip_addr[3];
5239 
5240 	hash_for_each_possible(iwdev->rf->ah_hash_tbl, ah, list, key) {
5241 		/* Set ah_valid and ah_id the same so memcmp can work */
5242 		new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
5243 		new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
5244 		if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
5245 			    sizeof(ah->sc_ah.ah_info))) {
5246 			refcount_inc(&ah->refcnt);
5247 			new_ah->parent_ah = ah;
5248 			return true;
5249 		}
5250 	}
5251 
5252 	return false;
5253 }
5254 
5255 /**
5256  * irdma_destroy_ah - Destroy address handle
5257  * @ibah: pointer to address handle
5258  * @ah_flags: flags for sleepable
5259  */
irdma_destroy_ah(struct ib_ah * ibah,u32 ah_flags)5260 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
5261 {
5262 	struct irdma_device *iwdev = to_iwdev(ibah->device);
5263 	struct irdma_ah *ah = to_iwah(ibah);
5264 
5265 	if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
5266 		mutex_lock(&iwdev->rf->ah_tbl_lock);
5267 		if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
5268 			mutex_unlock(&iwdev->rf->ah_tbl_lock);
5269 			return 0;
5270 		}
5271 		hash_del(&ah->parent_ah->list);
5272 		kfree(ah->parent_ah);
5273 		mutex_unlock(&iwdev->rf->ah_tbl_lock);
5274 	}
5275 
5276 	irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
5277 			false, NULL, ah);
5278 
5279 	irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
5280 			ah->sc_ah.ah_info.ah_idx);
5281 
5282 	return 0;
5283 }
5284 
5285 /**
5286  * irdma_create_user_ah - create user address handle
5287  * @ibah: address handle
5288  * @attr: address handle attributes
5289  * @udata: User data
5290  *
5291  * returns 0 on success, error otherwise
5292  */
irdma_create_user_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * attr,struct ib_udata * udata)5293 static int irdma_create_user_ah(struct ib_ah *ibah,
5294 				struct rdma_ah_init_attr *attr,
5295 				struct ib_udata *udata)
5296 {
5297 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
5298 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
5299 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
5300 	struct irdma_create_ah_resp uresp = {};
5301 	struct irdma_ah *parent_ah;
5302 	int err;
5303 
5304 	if (udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
5305 		return -EINVAL;
5306 
5307 	err = irdma_setup_ah(ibah, attr);
5308 	if (err)
5309 		return err;
5310 	mutex_lock(&iwdev->rf->ah_tbl_lock);
5311 	if (!irdma_ah_exists(iwdev, ah)) {
5312 		err = irdma_create_hw_ah(iwdev, ah, true);
5313 		if (err) {
5314 			mutex_unlock(&iwdev->rf->ah_tbl_lock);
5315 			return err;
5316 		}
5317 		/* Add new AH to list */
5318 		parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
5319 		if (parent_ah) {
5320 			u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
5321 				  parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
5322 				  parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
5323 				  parent_ah->sc_ah.ah_info.dest_ip_addr[3];
5324 
5325 			ah->parent_ah = parent_ah;
5326 			hash_add(iwdev->rf->ah_hash_tbl, &parent_ah->list, key);
5327 			refcount_set(&parent_ah->refcnt, 1);
5328 		}
5329 	}
5330 	mutex_unlock(&iwdev->rf->ah_tbl_lock);
5331 
5332 	uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
5333 	err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
5334 	if (err)
5335 		irdma_destroy_ah(ibah, attr->flags);
5336 
5337 	return err;
5338 }
5339 
5340 /**
5341  * irdma_create_ah - create address handle
5342  * @ibah: address handle
5343  * @attr: address handle attributes
5344  * @udata: NULL
5345  *
5346  * returns 0 on success, error otherwise
5347  */
irdma_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * attr,struct ib_udata * udata)5348 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
5349 			   struct ib_udata *udata)
5350 {
5351 	struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
5352 	struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
5353 	int err;
5354 
5355 	err = irdma_setup_ah(ibah, attr);
5356 	if (err)
5357 		return err;
5358 	err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
5359 
5360 	return err;
5361 }
5362 
5363 /**
5364  * irdma_query_ah - Query address handle
5365  * @ibah: pointer to address handle
5366  * @ah_attr: address handle attributes
5367  */
irdma_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * ah_attr)5368 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
5369 {
5370 	struct irdma_ah *ah = to_iwah(ibah);
5371 
5372 	memset(ah_attr, 0, sizeof(*ah_attr));
5373 	if (ah->av.attrs.ah_flags & IB_AH_GRH) {
5374 		ah_attr->ah_flags = IB_AH_GRH;
5375 		ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
5376 		ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
5377 		ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
5378 		ah_attr->grh.sgid_index = ah->sgid_index;
5379 		memcpy(&ah_attr->grh.dgid, &ah->dgid,
5380 		       sizeof(ah_attr->grh.dgid));
5381 	}
5382 
5383 	return 0;
5384 }
5385 
irdma_get_link_layer(struct ib_device * ibdev,u32 port_num)5386 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
5387 						 u32 port_num)
5388 {
5389 	return IB_LINK_LAYER_ETHERNET;
5390 }
5391 
5392 static const struct ib_device_ops irdma_gen1_dev_ops = {
5393 	.dealloc_driver = irdma_ib_dealloc_device,
5394 };
5395 
5396 static const struct ib_device_ops irdma_gen3_dev_ops = {
5397 	.alloc_mw = irdma_alloc_mw,
5398 	.create_srq = irdma_create_srq,
5399 	.dealloc_mw = irdma_dealloc_mw,
5400 	.destroy_srq = irdma_destroy_srq,
5401 	.modify_srq = irdma_modify_srq,
5402 	.post_srq_recv = irdma_post_srq_recv,
5403 	.query_srq = irdma_query_srq,
5404 };
5405 
5406 static const struct ib_device_ops irdma_roce_dev_ops = {
5407 	.attach_mcast = irdma_attach_mcast,
5408 	.create_ah = irdma_create_ah,
5409 	.create_user_ah = irdma_create_user_ah,
5410 	.destroy_ah = irdma_destroy_ah,
5411 	.detach_mcast = irdma_detach_mcast,
5412 	.get_link_layer = irdma_get_link_layer,
5413 	.get_port_immutable = irdma_roce_port_immutable,
5414 	.modify_qp = irdma_modify_qp_roce,
5415 	.query_ah = irdma_query_ah,
5416 	.query_pkey = irdma_query_pkey,
5417 };
5418 
5419 static const struct ib_device_ops irdma_iw_dev_ops = {
5420 	.get_port_immutable = irdma_iw_port_immutable,
5421 	.iw_accept = irdma_accept,
5422 	.iw_add_ref = irdma_qp_add_ref,
5423 	.iw_connect = irdma_connect,
5424 	.iw_create_listen = irdma_create_listen,
5425 	.iw_destroy_listen = irdma_destroy_listen,
5426 	.iw_get_qp = irdma_get_qp,
5427 	.iw_reject = irdma_reject,
5428 	.iw_rem_ref = irdma_qp_rem_ref,
5429 	.modify_qp = irdma_modify_qp,
5430 	.query_gid = irdma_query_gid,
5431 };
5432 
5433 static const struct ib_device_ops irdma_dev_ops = {
5434 	.owner = THIS_MODULE,
5435 	.driver_id = RDMA_DRIVER_IRDMA,
5436 	.uverbs_abi_ver = IRDMA_ABI_VER,
5437 
5438 	.alloc_hw_port_stats = irdma_alloc_hw_port_stats,
5439 	.alloc_mr = irdma_alloc_mr,
5440 	.alloc_pd = irdma_alloc_pd,
5441 	.alloc_ucontext = irdma_alloc_ucontext,
5442 	.create_cq = irdma_create_cq,
5443 	.create_qp = irdma_create_qp,
5444 	.dealloc_driver = irdma_ib_dealloc_device,
5445 	.dealloc_mw = irdma_dealloc_mw,
5446 	.dealloc_pd = irdma_dealloc_pd,
5447 	.dealloc_ucontext = irdma_dealloc_ucontext,
5448 	.dereg_mr = irdma_dereg_mr,
5449 	.destroy_cq = irdma_destroy_cq,
5450 	.destroy_qp = irdma_destroy_qp,
5451 	.disassociate_ucontext = irdma_disassociate_ucontext,
5452 	.get_dev_fw_str = irdma_get_dev_fw_str,
5453 	.get_dma_mr = irdma_get_dma_mr,
5454 	.get_hw_stats = irdma_get_hw_stats,
5455 	.map_mr_sg = irdma_map_mr_sg,
5456 	.mmap = irdma_mmap,
5457 	.mmap_free = irdma_mmap_free,
5458 	.poll_cq = irdma_poll_cq,
5459 	.post_recv = irdma_post_recv,
5460 	.post_send = irdma_post_send,
5461 	.query_device = irdma_query_device,
5462 	.query_port = irdma_query_port,
5463 	.query_qp = irdma_query_qp,
5464 	.reg_user_mr = irdma_reg_user_mr,
5465 	.reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
5466 	.rereg_user_mr = irdma_rereg_user_mr,
5467 	.req_notify_cq = irdma_req_notify_cq,
5468 	.resize_user_cq = irdma_resize_cq,
5469 	INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
5470 	INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
5471 	INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
5472 	INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
5473 	INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
5474 	INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
5475 	INIT_RDMA_OBJ_SIZE(ib_srq, irdma_srq, ibsrq),
5476 };
5477 
5478 /**
5479  * irdma_init_roce_device - initialization of roce rdma device
5480  * @iwdev: irdma device
5481  */
irdma_init_roce_device(struct irdma_device * iwdev)5482 static void irdma_init_roce_device(struct irdma_device *iwdev)
5483 {
5484 	iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
5485 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
5486 			    iwdev->netdev->dev_addr);
5487 	ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
5488 }
5489 
5490 /**
5491  * irdma_init_iw_device - initialization of iwarp rdma device
5492  * @iwdev: irdma device
5493  */
irdma_init_iw_device(struct irdma_device * iwdev)5494 static void irdma_init_iw_device(struct irdma_device *iwdev)
5495 {
5496 	struct net_device *netdev = iwdev->netdev;
5497 
5498 	iwdev->ibdev.node_type = RDMA_NODE_RNIC;
5499 	addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
5500 			    netdev->dev_addr);
5501 	memcpy(iwdev->ibdev.iw_ifname, netdev->name,
5502 	       sizeof(iwdev->ibdev.iw_ifname));
5503 	ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
5504 }
5505 
5506 /**
5507  * irdma_init_rdma_device - initialization of rdma device
5508  * @iwdev: irdma device
5509  */
irdma_init_rdma_device(struct irdma_device * iwdev)5510 static void irdma_init_rdma_device(struct irdma_device *iwdev)
5511 {
5512 	struct pci_dev *pcidev = iwdev->rf->pcidev;
5513 
5514 	if (iwdev->roce_mode)
5515 		irdma_init_roce_device(iwdev);
5516 	else
5517 		irdma_init_iw_device(iwdev);
5518 
5519 	iwdev->ibdev.phys_port_cnt = 1;
5520 	iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
5521 	iwdev->ibdev.dev.parent = &pcidev->dev;
5522 	ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
5523 	if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
5524 		ib_set_device_ops(&iwdev->ibdev, &irdma_gen1_dev_ops);
5525 	if (iwdev->rf->rdma_ver >= IRDMA_GEN_3)
5526 		ib_set_device_ops(&iwdev->ibdev, &irdma_gen3_dev_ops);
5527 }
5528 
5529 /**
5530  * irdma_port_ibevent - indicate port event
5531  * @iwdev: irdma device
5532  */
irdma_port_ibevent(struct irdma_device * iwdev)5533 void irdma_port_ibevent(struct irdma_device *iwdev)
5534 {
5535 	struct ib_event event;
5536 
5537 	event.device = &iwdev->ibdev;
5538 	event.element.port_num = 1;
5539 	event.event =
5540 		iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
5541 	ib_dispatch_event(&event);
5542 }
5543 
5544 /**
5545  * irdma_ib_unregister_device - unregister rdma device from IB
5546  * core
5547  * @iwdev: irdma device
5548  */
irdma_ib_unregister_device(struct irdma_device * iwdev)5549 void irdma_ib_unregister_device(struct irdma_device *iwdev)
5550 {
5551 	iwdev->iw_status = 0;
5552 	irdma_port_ibevent(iwdev);
5553 	ib_unregister_device(&iwdev->ibdev);
5554 }
5555 
5556 /**
5557  * irdma_ib_register_device - register irdma device to IB core
5558  * @iwdev: irdma device
5559  */
irdma_ib_register_device(struct irdma_device * iwdev)5560 int irdma_ib_register_device(struct irdma_device *iwdev)
5561 {
5562 	int ret;
5563 
5564 	irdma_init_rdma_device(iwdev);
5565 
5566 	ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
5567 	if (ret)
5568 		goto error;
5569 	dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
5570 	ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
5571 	if (ret)
5572 		goto error;
5573 
5574 	iwdev->iw_status = 1;
5575 	irdma_port_ibevent(iwdev);
5576 
5577 	return 0;
5578 
5579 error:
5580 	if (ret)
5581 		ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
5582 
5583 	return ret;
5584 }
5585 
5586 /**
5587  * irdma_ib_dealloc_device
5588  * @ibdev: ib device
5589  *
5590  * callback from ibdev dealloc_driver to deallocate resources
5591  * unber irdma device
5592  */
irdma_ib_dealloc_device(struct ib_device * ibdev)5593 void irdma_ib_dealloc_device(struct ib_device *ibdev)
5594 {
5595 	struct irdma_device *iwdev = to_iwdev(ibdev);
5596 
5597 	irdma_rt_deinit_hw(iwdev);
5598 	if (!iwdev->is_vport) {
5599 		irdma_ctrl_deinit_hw(iwdev->rf);
5600 		if (iwdev->rf->vchnl_wq) {
5601 			destroy_workqueue(iwdev->rf->vchnl_wq);
5602 			mutex_destroy(&iwdev->rf->sc_dev.vchnl_mutex);
5603 		}
5604 	}
5605 }
5606