1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4
5 /**
6 * irdma_query_device - get device attributes
7 * @ibdev: device pointer from stack
8 * @props: returning device attributes
9 * @udata: user data
10 */
irdma_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)11 static int irdma_query_device(struct ib_device *ibdev,
12 struct ib_device_attr *props,
13 struct ib_udata *udata)
14 {
15 struct irdma_device *iwdev = to_iwdev(ibdev);
16 struct irdma_pci_f *rf = iwdev->rf;
17 struct pci_dev *pcidev = iwdev->rf->pcidev;
18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
19
20 if (udata->inlen || udata->outlen)
21 return -EINVAL;
22
23 memset(props, 0, sizeof(*props));
24 addrconf_addr_eui48((u8 *)&props->sys_image_guid,
25 iwdev->netdev->dev_addr);
26 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
27 irdma_fw_minor_ver(&rf->sc_dev);
28 props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
29 IB_DEVICE_MEM_MGT_EXTENSIONS;
30 props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
31 props->vendor_id = pcidev->vendor;
32 props->vendor_part_id = pcidev->device;
33
34 props->hw_ver = rf->pcidev->revision;
35 props->page_size_cap = hw_attrs->page_size_cap;
36 props->max_mr_size = hw_attrs->max_mr_size;
37 props->max_qp = rf->max_qp - rf->used_qps;
38 props->max_qp_wr = hw_attrs->max_qp_wr;
39 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
40 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
41 props->max_cq = rf->max_cq - rf->used_cqs;
42 props->max_cqe = rf->max_cqe - 1;
43 props->max_mr = rf->max_mr - rf->used_mrs;
44 props->max_mw = props->max_mr;
45 props->max_pd = rf->max_pd - rf->used_pds;
46 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
47 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
48 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
49 if (rdma_protocol_roce(ibdev, 1)) {
50 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
51 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
52 }
53
54 props->max_ah = rf->max_ah;
55 props->max_mcast_grp = rf->max_mcg;
56 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
57 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
58 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
59 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
60 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
61 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
62
63 return 0;
64 }
65
66 /**
67 * irdma_query_port - get port attributes
68 * @ibdev: device pointer from stack
69 * @port: port number for query
70 * @props: returning device attributes
71 */
irdma_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)72 static int irdma_query_port(struct ib_device *ibdev, u32 port,
73 struct ib_port_attr *props)
74 {
75 struct irdma_device *iwdev = to_iwdev(ibdev);
76 struct net_device *netdev = iwdev->netdev;
77
78 /* no need to zero out pros here. done by caller */
79
80 props->max_mtu = IB_MTU_4096;
81 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
82 props->lid = 1;
83 props->lmc = 0;
84 props->sm_lid = 0;
85 props->sm_sl = 0;
86 if (netif_carrier_ok(netdev) && netif_running(netdev)) {
87 props->state = IB_PORT_ACTIVE;
88 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
89 } else {
90 props->state = IB_PORT_DOWN;
91 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
92 }
93
94 ib_get_eth_speed(ibdev, port, &props->active_speed,
95 &props->active_width);
96
97 if (rdma_protocol_roce(ibdev, 1)) {
98 props->gid_tbl_len = 32;
99 props->ip_gids = true;
100 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
101 } else {
102 props->gid_tbl_len = 1;
103 }
104 props->qkey_viol_cntr = 0;
105 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
106 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
107
108 return 0;
109 }
110
111 /**
112 * irdma_disassociate_ucontext - Disassociate user context
113 * @context: ib user context
114 */
irdma_disassociate_ucontext(struct ib_ucontext * context)115 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
116 {
117 }
118
irdma_mmap_legacy(struct irdma_ucontext * ucontext,struct vm_area_struct * vma)119 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
120 struct vm_area_struct *vma)
121 {
122 u64 pfn;
123
124 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
125 return -EINVAL;
126
127 vma->vm_private_data = ucontext;
128 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
129 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
130
131 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
132 pgprot_noncached(vma->vm_page_prot), NULL);
133 }
134
irdma_mmap_free(struct rdma_user_mmap_entry * rdma_entry)135 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
136 {
137 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
138
139 kfree(entry);
140 }
141
142 static struct rdma_user_mmap_entry*
irdma_user_mmap_entry_insert(struct irdma_ucontext * ucontext,u64 bar_offset,enum irdma_mmap_flag mmap_flag,u64 * mmap_offset)143 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
144 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
145 {
146 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
147 int ret;
148
149 if (!entry)
150 return NULL;
151
152 entry->bar_offset = bar_offset;
153 entry->mmap_flag = mmap_flag;
154
155 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
156 &entry->rdma_entry, PAGE_SIZE);
157 if (ret) {
158 kfree(entry);
159 return NULL;
160 }
161 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
162
163 return &entry->rdma_entry;
164 }
165
166 /**
167 * irdma_mmap - user memory map
168 * @context: context created during alloc
169 * @vma: kernel info for user memory map
170 */
irdma_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)171 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
172 {
173 struct rdma_user_mmap_entry *rdma_entry;
174 struct irdma_user_mmap_entry *entry;
175 struct irdma_ucontext *ucontext;
176 u64 pfn;
177 int ret;
178
179 ucontext = to_ucontext(context);
180
181 /* Legacy support for libi40iw with hard-coded mmap key */
182 if (ucontext->legacy_mode)
183 return irdma_mmap_legacy(ucontext, vma);
184
185 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
186 if (!rdma_entry) {
187 ibdev_dbg(&ucontext->iwdev->ibdev,
188 "VERBS: pgoff[0x%lx] does not have valid entry\n",
189 vma->vm_pgoff);
190 return -EINVAL;
191 }
192
193 entry = to_irdma_mmap_entry(rdma_entry);
194 ibdev_dbg(&ucontext->iwdev->ibdev,
195 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
196 entry->bar_offset, entry->mmap_flag);
197
198 pfn = (entry->bar_offset +
199 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
200
201 switch (entry->mmap_flag) {
202 case IRDMA_MMAP_IO_NC:
203 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
204 pgprot_noncached(vma->vm_page_prot),
205 rdma_entry);
206 break;
207 case IRDMA_MMAP_IO_WC:
208 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
209 pgprot_writecombine(vma->vm_page_prot),
210 rdma_entry);
211 break;
212 default:
213 ret = -EINVAL;
214 }
215
216 if (ret)
217 ibdev_dbg(&ucontext->iwdev->ibdev,
218 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
219 entry->bar_offset, entry->mmap_flag, ret);
220 rdma_user_mmap_entry_put(rdma_entry);
221
222 return ret;
223 }
224
225 /**
226 * irdma_alloc_push_page - allocate a push page for qp
227 * @iwqp: qp pointer
228 */
irdma_alloc_push_page(struct irdma_qp * iwqp)229 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
230 {
231 struct irdma_cqp_request *cqp_request;
232 struct cqp_cmds_info *cqp_info;
233 struct irdma_device *iwdev = iwqp->iwdev;
234 struct irdma_sc_qp *qp = &iwqp->sc_qp;
235 int status;
236
237 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
238 if (!cqp_request)
239 return;
240
241 cqp_info = &cqp_request->info;
242 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
243 cqp_info->post_sq = 1;
244 cqp_info->in.u.manage_push_page.info.push_idx = 0;
245 cqp_info->in.u.manage_push_page.info.qs_handle =
246 qp->vsi->qos[qp->user_pri].qs_handle;
247 cqp_info->in.u.manage_push_page.info.free_page = 0;
248 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
249 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
250 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
251
252 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
253 if (!status && cqp_request->compl_info.op_ret_val <
254 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
255 qp->push_idx = cqp_request->compl_info.op_ret_val;
256 qp->push_offset = 0;
257 }
258
259 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
260 }
261
262 /**
263 * irdma_alloc_ucontext - Allocate the user context data structure
264 * @uctx: uverbs context pointer
265 * @udata: user data
266 *
267 * This keeps track of all objects associated with a particular
268 * user-mode client.
269 */
irdma_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)270 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
271 struct ib_udata *udata)
272 {
273 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
274 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
275 struct ib_device *ibdev = uctx->device;
276 struct irdma_device *iwdev = to_iwdev(ibdev);
277 struct irdma_alloc_ucontext_req req = {};
278 struct irdma_alloc_ucontext_resp uresp = {};
279 struct irdma_ucontext *ucontext = to_ucontext(uctx);
280 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
281
282 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
283 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
284 return -EINVAL;
285
286 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
287 return -EINVAL;
288
289 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
290 goto ver_error;
291
292 ucontext->iwdev = iwdev;
293 ucontext->abi_ver = req.userspace_ver;
294
295 if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
296 ucontext->use_raw_attrs = true;
297
298 /* GEN_1 legacy support with libi40iw */
299 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
300 if (uk_attrs->hw_rev != IRDMA_GEN_1)
301 return -EOPNOTSUPP;
302
303 ucontext->legacy_mode = true;
304 uresp.max_qps = iwdev->rf->max_qp;
305 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
306 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
307 uresp.kernel_ver = req.userspace_ver;
308 if (ib_copy_to_udata(udata, &uresp,
309 min(sizeof(uresp), udata->outlen)))
310 return -EFAULT;
311 } else {
312 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
313
314 ucontext->db_mmap_entry =
315 irdma_user_mmap_entry_insert(ucontext, bar_off,
316 IRDMA_MMAP_IO_NC,
317 &uresp.db_mmap_key);
318 if (!ucontext->db_mmap_entry)
319 return -ENOMEM;
320
321 uresp.kernel_ver = IRDMA_ABI_VER;
322 uresp.feature_flags = uk_attrs->feature_flags;
323 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
324 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
325 uresp.max_hw_inline = uk_attrs->max_hw_inline;
326 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
327 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
328 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
329 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
330 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
331 uresp.hw_rev = uk_attrs->hw_rev;
332 uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
333 uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
334 uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
335 if (ib_copy_to_udata(udata, &uresp,
336 min(sizeof(uresp), udata->outlen))) {
337 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
338 return -EFAULT;
339 }
340 }
341
342 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
343 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
344 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
345 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
346
347 return 0;
348
349 ver_error:
350 ibdev_err(&iwdev->ibdev,
351 "Invalid userspace driver version detected. Detected version %d, should be %d\n",
352 req.userspace_ver, IRDMA_ABI_VER);
353 return -EINVAL;
354 }
355
356 /**
357 * irdma_dealloc_ucontext - deallocate the user context data structure
358 * @context: user context created during alloc
359 */
irdma_dealloc_ucontext(struct ib_ucontext * context)360 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
361 {
362 struct irdma_ucontext *ucontext = to_ucontext(context);
363
364 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
365 }
366
367 /**
368 * irdma_alloc_pd - allocate protection domain
369 * @pd: PD pointer
370 * @udata: user data
371 */
irdma_alloc_pd(struct ib_pd * pd,struct ib_udata * udata)372 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
373 {
374 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
375 struct irdma_pd *iwpd = to_iwpd(pd);
376 struct irdma_device *iwdev = to_iwdev(pd->device);
377 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
378 struct irdma_pci_f *rf = iwdev->rf;
379 struct irdma_alloc_pd_resp uresp = {};
380 struct irdma_sc_pd *sc_pd;
381 u32 pd_id = 0;
382 int err;
383
384 if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
385 return -EINVAL;
386
387 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
388 &rf->next_pd);
389 if (err)
390 return err;
391
392 sc_pd = &iwpd->sc_pd;
393 if (udata) {
394 struct irdma_ucontext *ucontext =
395 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
396 ibucontext);
397 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
398 uresp.pd_id = pd_id;
399 if (ib_copy_to_udata(udata, &uresp,
400 min(sizeof(uresp), udata->outlen))) {
401 err = -EFAULT;
402 goto error;
403 }
404 } else {
405 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
406 }
407
408 return 0;
409 error:
410 irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
411
412 return err;
413 }
414
415 /**
416 * irdma_dealloc_pd - deallocate pd
417 * @ibpd: ptr of pd to be deallocated
418 * @udata: user data
419 */
irdma_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)420 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
421 {
422 struct irdma_pd *iwpd = to_iwpd(ibpd);
423 struct irdma_device *iwdev = to_iwdev(ibpd->device);
424
425 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
426
427 return 0;
428 }
429
430 /**
431 * irdma_get_pbl - Retrieve pbl from a list given a virtual
432 * address
433 * @va: user virtual address
434 * @pbl_list: pbl list to search in (QP's or CQ's)
435 */
irdma_get_pbl(unsigned long va,struct list_head * pbl_list)436 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
437 struct list_head *pbl_list)
438 {
439 struct irdma_pbl *iwpbl;
440
441 list_for_each_entry (iwpbl, pbl_list, list) {
442 if (iwpbl->user_base == va) {
443 list_del(&iwpbl->list);
444 iwpbl->on_list = false;
445 return iwpbl;
446 }
447 }
448
449 return NULL;
450 }
451
452 /**
453 * irdma_clean_cqes - clean cq entries for qp
454 * @iwqp: qp ptr (user or kernel)
455 * @iwcq: cq ptr
456 */
irdma_clean_cqes(struct irdma_qp * iwqp,struct irdma_cq * iwcq)457 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
458 {
459 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
460 unsigned long flags;
461
462 spin_lock_irqsave(&iwcq->lock, flags);
463 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
464 spin_unlock_irqrestore(&iwcq->lock, flags);
465 }
466
irdma_remove_push_mmap_entries(struct irdma_qp * iwqp)467 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
468 {
469 if (iwqp->push_db_mmap_entry) {
470 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
471 iwqp->push_db_mmap_entry = NULL;
472 }
473 if (iwqp->push_wqe_mmap_entry) {
474 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
475 iwqp->push_wqe_mmap_entry = NULL;
476 }
477 }
478
irdma_setup_push_mmap_entries(struct irdma_ucontext * ucontext,struct irdma_qp * iwqp,u64 * push_wqe_mmap_key,u64 * push_db_mmap_key)479 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
480 struct irdma_qp *iwqp,
481 u64 *push_wqe_mmap_key,
482 u64 *push_db_mmap_key)
483 {
484 struct irdma_device *iwdev = ucontext->iwdev;
485 u64 rsvd, bar_off;
486
487 rsvd = IRDMA_PF_BAR_RSVD;
488 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
489 /* skip over db page */
490 bar_off += IRDMA_HW_PAGE_SIZE;
491 /* push wqe page */
492 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
493 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
494 bar_off, IRDMA_MMAP_IO_WC,
495 push_wqe_mmap_key);
496 if (!iwqp->push_wqe_mmap_entry)
497 return -ENOMEM;
498
499 /* push doorbell page */
500 bar_off += IRDMA_HW_PAGE_SIZE;
501 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
502 bar_off, IRDMA_MMAP_IO_NC,
503 push_db_mmap_key);
504 if (!iwqp->push_db_mmap_entry) {
505 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
506 return -ENOMEM;
507 }
508
509 return 0;
510 }
511
512 /**
513 * irdma_destroy_qp - destroy qp
514 * @ibqp: qp's ib pointer also to get to device's qp address
515 * @udata: user data
516 */
irdma_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)517 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
518 {
519 struct irdma_qp *iwqp = to_iwqp(ibqp);
520 struct irdma_device *iwdev = iwqp->iwdev;
521
522 iwqp->sc_qp.qp_uk.destroy_pending = true;
523
524 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
525 irdma_modify_qp_to_err(&iwqp->sc_qp);
526
527 if (!iwqp->user_mode)
528 cancel_delayed_work_sync(&iwqp->dwork_flush);
529
530 if (!iwqp->user_mode) {
531 if (iwqp->iwscq) {
532 irdma_clean_cqes(iwqp, iwqp->iwscq);
533 if (iwqp->iwrcq != iwqp->iwscq)
534 irdma_clean_cqes(iwqp, iwqp->iwrcq);
535 }
536 }
537
538 irdma_qp_rem_ref(&iwqp->ibqp);
539 wait_for_completion(&iwqp->free_qp);
540 irdma_free_lsmm_rsrc(iwqp);
541 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
542
543 irdma_remove_push_mmap_entries(iwqp);
544 irdma_free_qp_rsrc(iwqp);
545
546 return 0;
547 }
548
549 /**
550 * irdma_setup_virt_qp - setup for allocation of virtual qp
551 * @iwdev: irdma device
552 * @iwqp: qp ptr
553 * @init_info: initialize info to return
554 */
irdma_setup_virt_qp(struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_qp_init_info * init_info)555 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
556 struct irdma_qp *iwqp,
557 struct irdma_qp_init_info *init_info)
558 {
559 struct irdma_pbl *iwpbl = iwqp->iwpbl;
560 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
561
562 iwqp->page = qpmr->sq_page;
563 init_info->shadow_area_pa = qpmr->shadow;
564 if (iwpbl->pbl_allocated) {
565 init_info->virtual_map = true;
566 init_info->sq_pa = qpmr->sq_pbl.idx;
567 init_info->rq_pa = qpmr->rq_pbl.idx;
568 } else {
569 init_info->sq_pa = qpmr->sq_pbl.addr;
570 init_info->rq_pa = qpmr->rq_pbl.addr;
571 }
572 }
573
574 /**
575 * irdma_setup_umode_qp - setup sq and rq size in user mode qp
576 * @udata: udata
577 * @iwdev: iwarp device
578 * @iwqp: qp ptr (user or kernel)
579 * @info: initialize info to return
580 * @init_attr: Initial QP create attributes
581 */
irdma_setup_umode_qp(struct ib_udata * udata,struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_qp_init_info * info,struct ib_qp_init_attr * init_attr)582 static int irdma_setup_umode_qp(struct ib_udata *udata,
583 struct irdma_device *iwdev,
584 struct irdma_qp *iwqp,
585 struct irdma_qp_init_info *info,
586 struct ib_qp_init_attr *init_attr)
587 {
588 struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
589 struct irdma_ucontext, ibucontext);
590 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
591 struct irdma_create_qp_req req;
592 unsigned long flags;
593 int ret;
594
595 ret = ib_copy_from_udata(&req, udata,
596 min(sizeof(req), udata->inlen));
597 if (ret) {
598 ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
599 return ret;
600 }
601
602 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
603 iwqp->user_mode = 1;
604 if (req.user_wqe_bufs) {
605 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
606 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
607 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
608 &ucontext->qp_reg_mem_list);
609 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
610
611 if (!iwqp->iwpbl) {
612 ret = -ENODATA;
613 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
614 return ret;
615 }
616 }
617
618 if (!ucontext->use_raw_attrs) {
619 /**
620 * Maintain backward compat with older ABI which passes sq and
621 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
622 * There is no way to compute the correct value of
623 * iwqp->max_send_wr/max_recv_wr in the kernel.
624 */
625 iwqp->max_send_wr = init_attr->cap.max_send_wr;
626 iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
627 ukinfo->sq_size = init_attr->cap.max_send_wr;
628 ukinfo->rq_size = init_attr->cap.max_recv_wr;
629 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
630 &ukinfo->rq_shift);
631 } else {
632 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
633 &ukinfo->sq_shift);
634 if (ret)
635 return ret;
636
637 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
638 &ukinfo->rq_shift);
639 if (ret)
640 return ret;
641
642 iwqp->max_send_wr =
643 (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
644 iwqp->max_recv_wr =
645 (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
646 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
647 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
648 }
649
650 irdma_setup_virt_qp(iwdev, iwqp, info);
651
652 return 0;
653 }
654
655 /**
656 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
657 * @iwdev: iwarp device
658 * @iwqp: qp ptr (user or kernel)
659 * @info: initialize info to return
660 * @init_attr: Initial QP create attributes
661 */
irdma_setup_kmode_qp(struct irdma_device * iwdev,struct irdma_qp * iwqp,struct irdma_qp_init_info * info,struct ib_qp_init_attr * init_attr)662 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
663 struct irdma_qp *iwqp,
664 struct irdma_qp_init_info *info,
665 struct ib_qp_init_attr *init_attr)
666 {
667 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
668 u32 size;
669 int status;
670 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
671
672 status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
673 &ukinfo->sq_shift);
674 if (status)
675 return status;
676
677 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
678 &ukinfo->rq_shift);
679 if (status)
680 return status;
681
682 iwqp->kqp.sq_wrid_mem =
683 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
684 if (!iwqp->kqp.sq_wrid_mem)
685 return -ENOMEM;
686
687 iwqp->kqp.rq_wrid_mem =
688 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
689
690 if (!iwqp->kqp.rq_wrid_mem) {
691 kfree(iwqp->kqp.sq_wrid_mem);
692 iwqp->kqp.sq_wrid_mem = NULL;
693 return -ENOMEM;
694 }
695
696 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
697 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
698
699 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
700 size += (IRDMA_SHADOW_AREA_SIZE << 3);
701
702 mem->size = ALIGN(size, 256);
703 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
704 &mem->pa, GFP_KERNEL);
705 if (!mem->va) {
706 kfree(iwqp->kqp.sq_wrid_mem);
707 iwqp->kqp.sq_wrid_mem = NULL;
708 kfree(iwqp->kqp.rq_wrid_mem);
709 iwqp->kqp.rq_wrid_mem = NULL;
710 return -ENOMEM;
711 }
712
713 ukinfo->sq = mem->va;
714 info->sq_pa = mem->pa;
715 ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
716 info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
717 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
718 info->shadow_area_pa =
719 info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
720 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
721 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
722
723 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
724 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
725 init_attr->cap.max_send_wr = iwqp->max_send_wr;
726 init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
727
728 return 0;
729 }
730
irdma_cqp_create_qp_cmd(struct irdma_qp * iwqp)731 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
732 {
733 struct irdma_pci_f *rf = iwqp->iwdev->rf;
734 struct irdma_cqp_request *cqp_request;
735 struct cqp_cmds_info *cqp_info;
736 struct irdma_create_qp_info *qp_info;
737 int status;
738
739 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
740 if (!cqp_request)
741 return -ENOMEM;
742
743 cqp_info = &cqp_request->info;
744 qp_info = &cqp_request->info.in.u.qp_create.info;
745 memset(qp_info, 0, sizeof(*qp_info));
746 qp_info->mac_valid = true;
747 qp_info->cq_num_valid = true;
748 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
749
750 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
751 cqp_info->post_sq = 1;
752 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
753 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
754 status = irdma_handle_cqp_op(rf, cqp_request);
755 irdma_put_cqp_request(&rf->cqp, cqp_request);
756
757 return status;
758 }
759
irdma_roce_fill_and_set_qpctx_info(struct irdma_qp * iwqp,struct irdma_qp_host_ctx_info * ctx_info)760 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
761 struct irdma_qp_host_ctx_info *ctx_info)
762 {
763 struct irdma_device *iwdev = iwqp->iwdev;
764 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
765 struct irdma_roce_offload_info *roce_info;
766 struct irdma_udp_offload_info *udp_info;
767
768 udp_info = &iwqp->udp_info;
769 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
770 udp_info->cwnd = iwdev->roce_cwnd;
771 udp_info->rexmit_thresh = 2;
772 udp_info->rnr_nak_thresh = 2;
773 udp_info->src_port = 0xc000;
774 udp_info->dst_port = ROCE_V2_UDP_DPORT;
775 roce_info = &iwqp->roce_info;
776 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
777
778 roce_info->rd_en = true;
779 roce_info->wr_rdresp_en = true;
780 roce_info->bind_en = true;
781 roce_info->dcqcn_en = false;
782 roce_info->rtomin = 5;
783
784 roce_info->ack_credits = iwdev->roce_ackcreds;
785 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
786 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
787
788 if (!iwqp->user_mode) {
789 roce_info->priv_mode_en = true;
790 roce_info->fast_reg_en = true;
791 roce_info->udprivcq_en = true;
792 }
793 roce_info->roce_tver = 0;
794
795 ctx_info->roce_info = &iwqp->roce_info;
796 ctx_info->udp_info = &iwqp->udp_info;
797 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
798 }
799
irdma_iw_fill_and_set_qpctx_info(struct irdma_qp * iwqp,struct irdma_qp_host_ctx_info * ctx_info)800 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
801 struct irdma_qp_host_ctx_info *ctx_info)
802 {
803 struct irdma_device *iwdev = iwqp->iwdev;
804 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
805 struct irdma_iwarp_offload_info *iwarp_info;
806
807 iwarp_info = &iwqp->iwarp_info;
808 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
809 iwarp_info->rd_en = true;
810 iwarp_info->wr_rdresp_en = true;
811 iwarp_info->bind_en = true;
812 iwarp_info->ecn_en = true;
813 iwarp_info->rtomin = 5;
814
815 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
816 iwarp_info->ib_rd_en = true;
817 if (!iwqp->user_mode) {
818 iwarp_info->priv_mode_en = true;
819 iwarp_info->fast_reg_en = true;
820 }
821 iwarp_info->ddp_ver = 1;
822 iwarp_info->rdmap_ver = 1;
823
824 ctx_info->iwarp_info = &iwqp->iwarp_info;
825 ctx_info->iwarp_info_valid = true;
826 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
827 ctx_info->iwarp_info_valid = false;
828 }
829
irdma_validate_qp_attrs(struct ib_qp_init_attr * init_attr,struct irdma_device * iwdev)830 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
831 struct irdma_device *iwdev)
832 {
833 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
834 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
835
836 if (init_attr->create_flags)
837 return -EOPNOTSUPP;
838
839 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
840 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
841 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
842 init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
843 init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
844 return -EINVAL;
845
846 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
847 if (init_attr->qp_type != IB_QPT_RC &&
848 init_attr->qp_type != IB_QPT_UD &&
849 init_attr->qp_type != IB_QPT_GSI)
850 return -EOPNOTSUPP;
851 } else {
852 if (init_attr->qp_type != IB_QPT_RC)
853 return -EOPNOTSUPP;
854 }
855
856 return 0;
857 }
858
irdma_flush_worker(struct work_struct * work)859 static void irdma_flush_worker(struct work_struct *work)
860 {
861 struct delayed_work *dwork = to_delayed_work(work);
862 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
863
864 irdma_generate_flush_completions(iwqp);
865 }
866
867 /**
868 * irdma_create_qp - create qp
869 * @ibqp: ptr of qp
870 * @init_attr: attributes for qp
871 * @udata: user data for create qp
872 */
irdma_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)873 static int irdma_create_qp(struct ib_qp *ibqp,
874 struct ib_qp_init_attr *init_attr,
875 struct ib_udata *udata)
876 {
877 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
878 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
879 struct ib_pd *ibpd = ibqp->pd;
880 struct irdma_pd *iwpd = to_iwpd(ibpd);
881 struct irdma_device *iwdev = to_iwdev(ibpd->device);
882 struct irdma_pci_f *rf = iwdev->rf;
883 struct irdma_qp *iwqp = to_iwqp(ibqp);
884 struct irdma_create_qp_resp uresp = {};
885 u32 qp_num = 0;
886 int err_code;
887 struct irdma_sc_qp *qp;
888 struct irdma_sc_dev *dev = &rf->sc_dev;
889 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
890 struct irdma_qp_init_info init_info = {};
891 struct irdma_qp_host_ctx_info *ctx_info;
892
893 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
894 if (err_code)
895 return err_code;
896
897 if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
898 udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
899 return -EINVAL;
900
901 init_info.vsi = &iwdev->vsi;
902 init_info.qp_uk_init_info.uk_attrs = uk_attrs;
903 init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
904 init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
905 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
906 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
907 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
908
909 qp = &iwqp->sc_qp;
910 qp->qp_uk.back_qp = iwqp;
911 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
912
913 iwqp->iwdev = iwdev;
914 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
915 256);
916 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
917 iwqp->q2_ctx_mem.size,
918 &iwqp->q2_ctx_mem.pa,
919 GFP_KERNEL);
920 if (!iwqp->q2_ctx_mem.va)
921 return -ENOMEM;
922
923 init_info.q2 = iwqp->q2_ctx_mem.va;
924 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
925 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
926 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
927
928 if (init_attr->qp_type == IB_QPT_GSI)
929 qp_num = 1;
930 else
931 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
932 &qp_num, &rf->next_qp);
933 if (err_code)
934 goto error;
935
936 iwqp->iwpd = iwpd;
937 iwqp->ibqp.qp_num = qp_num;
938 qp = &iwqp->sc_qp;
939 iwqp->iwscq = to_iwcq(init_attr->send_cq);
940 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
941 iwqp->host_ctx.va = init_info.host_ctx;
942 iwqp->host_ctx.pa = init_info.host_ctx_pa;
943 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
944
945 init_info.pd = &iwpd->sc_pd;
946 init_info.qp_uk_init_info.qp_id = qp_num;
947 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
948 init_info.qp_uk_init_info.first_sq_wq = 1;
949 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
950 init_waitqueue_head(&iwqp->waitq);
951 init_waitqueue_head(&iwqp->mod_qp_waitq);
952
953 if (udata) {
954 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
955 err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
956 init_attr);
957 } else {
958 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
959 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
960 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
961 }
962
963 if (err_code) {
964 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
965 goto error;
966 }
967
968 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
969 if (init_attr->qp_type == IB_QPT_RC) {
970 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
971 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
972 IRDMA_WRITE_WITH_IMM |
973 IRDMA_ROCE;
974 } else {
975 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
976 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
977 IRDMA_ROCE;
978 }
979 } else {
980 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
981 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
982 }
983
984 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
985 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
986
987 err_code = irdma_sc_qp_init(qp, &init_info);
988 if (err_code) {
989 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
990 goto error;
991 }
992
993 ctx_info = &iwqp->ctx_info;
994 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
995 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
996
997 if (rdma_protocol_roce(&iwdev->ibdev, 1))
998 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
999 else
1000 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
1001
1002 err_code = irdma_cqp_create_qp_cmd(iwqp);
1003 if (err_code)
1004 goto error;
1005
1006 refcount_set(&iwqp->refcnt, 1);
1007 spin_lock_init(&iwqp->lock);
1008 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
1009 iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1010 rf->qp_table[qp_num] = iwqp;
1011
1012 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
1013 if (dev->ws_add(&iwdev->vsi, 0)) {
1014 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
1015 err_code = -EINVAL;
1016 goto error;
1017 }
1018
1019 irdma_qp_add_qos(&iwqp->sc_qp);
1020 }
1021
1022 if (udata) {
1023 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
1024 if (udata->outlen < sizeof(uresp)) {
1025 uresp.lsmm = 1;
1026 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
1027 } else {
1028 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
1029 uresp.lsmm = 1;
1030 }
1031 uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
1032 uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
1033 uresp.qp_id = qp_num;
1034 uresp.qp_caps = qp->qp_uk.qp_caps;
1035
1036 err_code = ib_copy_to_udata(udata, &uresp,
1037 min(sizeof(uresp), udata->outlen));
1038 if (err_code) {
1039 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
1040 irdma_destroy_qp(&iwqp->ibqp, udata);
1041 return err_code;
1042 }
1043 }
1044
1045 init_completion(&iwqp->free_qp);
1046 return 0;
1047
1048 error:
1049 irdma_free_qp_rsrc(iwqp);
1050 return err_code;
1051 }
1052
irdma_get_ib_acc_flags(struct irdma_qp * iwqp)1053 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1054 {
1055 int acc_flags = 0;
1056
1057 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1058 if (iwqp->roce_info.wr_rdresp_en) {
1059 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1060 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1061 }
1062 if (iwqp->roce_info.rd_en)
1063 acc_flags |= IB_ACCESS_REMOTE_READ;
1064 if (iwqp->roce_info.bind_en)
1065 acc_flags |= IB_ACCESS_MW_BIND;
1066 } else {
1067 if (iwqp->iwarp_info.wr_rdresp_en) {
1068 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1069 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1070 }
1071 if (iwqp->iwarp_info.rd_en)
1072 acc_flags |= IB_ACCESS_REMOTE_READ;
1073 if (iwqp->iwarp_info.bind_en)
1074 acc_flags |= IB_ACCESS_MW_BIND;
1075 }
1076 return acc_flags;
1077 }
1078
1079 /**
1080 * irdma_query_qp - query qp attributes
1081 * @ibqp: qp pointer
1082 * @attr: attributes pointer
1083 * @attr_mask: Not used
1084 * @init_attr: qp attributes to return
1085 */
irdma_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1086 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1087 int attr_mask, struct ib_qp_init_attr *init_attr)
1088 {
1089 struct irdma_qp *iwqp = to_iwqp(ibqp);
1090 struct irdma_sc_qp *qp = &iwqp->sc_qp;
1091
1092 memset(attr, 0, sizeof(*attr));
1093 memset(init_attr, 0, sizeof(*init_attr));
1094
1095 attr->qp_state = iwqp->ibqp_state;
1096 attr->cur_qp_state = iwqp->ibqp_state;
1097 attr->cap.max_send_wr = iwqp->max_send_wr;
1098 attr->cap.max_recv_wr = iwqp->max_recv_wr;
1099 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1100 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1101 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1102 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1103 attr->port_num = 1;
1104 if (rdma_protocol_roce(ibqp->device, 1)) {
1105 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1106 attr->qkey = iwqp->roce_info.qkey;
1107 attr->rq_psn = iwqp->udp_info.epsn;
1108 attr->sq_psn = iwqp->udp_info.psn_nxt;
1109 attr->dest_qp_num = iwqp->roce_info.dest_qp;
1110 attr->pkey_index = iwqp->roce_info.p_key;
1111 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1112 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1113 attr->max_rd_atomic = iwqp->roce_info.ord_size;
1114 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1115 }
1116
1117 init_attr->event_handler = iwqp->ibqp.event_handler;
1118 init_attr->qp_context = iwqp->ibqp.qp_context;
1119 init_attr->send_cq = iwqp->ibqp.send_cq;
1120 init_attr->recv_cq = iwqp->ibqp.recv_cq;
1121 init_attr->cap = attr->cap;
1122
1123 return 0;
1124 }
1125
1126 /**
1127 * irdma_query_pkey - Query partition key
1128 * @ibdev: device pointer from stack
1129 * @port: port number
1130 * @index: index of pkey
1131 * @pkey: pointer to store the pkey
1132 */
irdma_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)1133 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1134 u16 *pkey)
1135 {
1136 if (index >= IRDMA_PKEY_TBL_SZ)
1137 return -EINVAL;
1138
1139 *pkey = IRDMA_DEFAULT_PKEY;
1140 return 0;
1141 }
1142
irdma_roce_get_vlan_prio(const struct ib_gid_attr * attr,u8 prio)1143 static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
1144 {
1145 struct net_device *ndev;
1146
1147 rcu_read_lock();
1148 ndev = rcu_dereference(attr->ndev);
1149 if (!ndev)
1150 goto exit;
1151 if (is_vlan_dev(ndev)) {
1152 u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
1153
1154 prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1155 }
1156 exit:
1157 rcu_read_unlock();
1158 return prio;
1159 }
1160
irdma_wait_for_suspend(struct irdma_qp * iwqp)1161 static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
1162 {
1163 if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
1164 !iwqp->suspend_pending,
1165 msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
1166 iwqp->suspend_pending = false;
1167 ibdev_warn(&iwqp->iwdev->ibdev,
1168 "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
1169 iwqp->ibqp.qp_num, iwqp->last_aeq);
1170 return -EBUSY;
1171 }
1172
1173 return 0;
1174 }
1175
1176 /**
1177 * irdma_modify_qp_roce - modify qp request
1178 * @ibqp: qp's pointer for modify
1179 * @attr: access attributes
1180 * @attr_mask: state mask
1181 * @udata: user data
1182 */
irdma_modify_qp_roce(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1183 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1184 int attr_mask, struct ib_udata *udata)
1185 {
1186 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1187 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1188 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1189 struct irdma_qp *iwqp = to_iwqp(ibqp);
1190 struct irdma_device *iwdev = iwqp->iwdev;
1191 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1192 struct irdma_qp_host_ctx_info *ctx_info;
1193 struct irdma_roce_offload_info *roce_info;
1194 struct irdma_udp_offload_info *udp_info;
1195 struct irdma_modify_qp_info info = {};
1196 struct irdma_modify_qp_resp uresp = {};
1197 struct irdma_modify_qp_req ureq = {};
1198 unsigned long flags;
1199 u8 issue_modify_qp = 0;
1200 int ret = 0;
1201
1202 ctx_info = &iwqp->ctx_info;
1203 roce_info = &iwqp->roce_info;
1204 udp_info = &iwqp->udp_info;
1205
1206 if (udata) {
1207 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1208 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1209 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1210 return -EINVAL;
1211 }
1212
1213 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1214 return -EOPNOTSUPP;
1215
1216 if (attr_mask & IB_QP_DEST_QPN)
1217 roce_info->dest_qp = attr->dest_qp_num;
1218
1219 if (attr_mask & IB_QP_PKEY_INDEX) {
1220 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1221 &roce_info->p_key);
1222 if (ret)
1223 return ret;
1224 }
1225
1226 if (attr_mask & IB_QP_QKEY)
1227 roce_info->qkey = attr->qkey;
1228
1229 if (attr_mask & IB_QP_PATH_MTU)
1230 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1231
1232 if (attr_mask & IB_QP_SQ_PSN) {
1233 udp_info->psn_nxt = attr->sq_psn;
1234 udp_info->lsn = 0xffff;
1235 udp_info->psn_una = attr->sq_psn;
1236 udp_info->psn_max = attr->sq_psn;
1237 }
1238
1239 if (attr_mask & IB_QP_RQ_PSN)
1240 udp_info->epsn = attr->rq_psn;
1241
1242 if (attr_mask & IB_QP_RNR_RETRY)
1243 udp_info->rnr_nak_thresh = attr->rnr_retry;
1244
1245 if (attr_mask & IB_QP_RETRY_CNT)
1246 udp_info->rexmit_thresh = attr->retry_cnt;
1247
1248 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1249
1250 if (attr_mask & IB_QP_AV) {
1251 struct irdma_av *av = &iwqp->roce_ah.av;
1252 const struct ib_gid_attr *sgid_attr =
1253 attr->ah_attr.grh.sgid_attr;
1254 u16 vlan_id = VLAN_N_VID;
1255 u32 local_ip[4];
1256
1257 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1258 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1259 udp_info->ttl = attr->ah_attr.grh.hop_limit;
1260 udp_info->flow_label = attr->ah_attr.grh.flow_label;
1261 udp_info->tos = attr->ah_attr.grh.traffic_class;
1262 udp_info->src_port =
1263 rdma_get_udp_sport(udp_info->flow_label,
1264 ibqp->qp_num,
1265 roce_info->dest_qp);
1266 irdma_qp_rem_qos(&iwqp->sc_qp);
1267 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1268 if (iwqp->sc_qp.vsi->dscp_mode)
1269 ctx_info->user_pri =
1270 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
1271 else
1272 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1273 }
1274 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1275 ctx_info->roce_info->mac_addr);
1276 if (ret)
1277 return ret;
1278 ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
1279 ctx_info->user_pri);
1280 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1281 return -ENOMEM;
1282 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1283 irdma_qp_add_qos(&iwqp->sc_qp);
1284
1285 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1286 vlan_id = 0;
1287 if (vlan_id < VLAN_N_VID) {
1288 udp_info->insert_vlan_tag = true;
1289 udp_info->vlan_tag = vlan_id |
1290 ctx_info->user_pri << VLAN_PRIO_SHIFT;
1291 } else {
1292 udp_info->insert_vlan_tag = false;
1293 }
1294
1295 av->attrs = attr->ah_attr;
1296 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1297 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1298 av->net_type = rdma_gid_attr_network_type(sgid_attr);
1299 if (av->net_type == RDMA_NETWORK_IPV6) {
1300 __be32 *daddr =
1301 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1302 __be32 *saddr =
1303 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1304
1305 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1306 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1307
1308 udp_info->ipv4 = false;
1309 irdma_copy_ip_ntohl(local_ip, daddr);
1310
1311 } else if (av->net_type == RDMA_NETWORK_IPV4) {
1312 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1313 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1314
1315 local_ip[0] = ntohl(daddr);
1316
1317 udp_info->ipv4 = true;
1318 udp_info->dest_ip_addr[0] = 0;
1319 udp_info->dest_ip_addr[1] = 0;
1320 udp_info->dest_ip_addr[2] = 0;
1321 udp_info->dest_ip_addr[3] = local_ip[0];
1322
1323 udp_info->local_ipaddr[0] = 0;
1324 udp_info->local_ipaddr[1] = 0;
1325 udp_info->local_ipaddr[2] = 0;
1326 udp_info->local_ipaddr[3] = ntohl(saddr);
1327 }
1328 udp_info->arp_idx =
1329 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1330 attr->ah_attr.roce.dmac);
1331 }
1332
1333 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1334 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1335 ibdev_err(&iwdev->ibdev,
1336 "rd_atomic = %d, above max_hw_ord=%d\n",
1337 attr->max_rd_atomic,
1338 dev->hw_attrs.max_hw_ord);
1339 return -EINVAL;
1340 }
1341 if (attr->max_rd_atomic)
1342 roce_info->ord_size = attr->max_rd_atomic;
1343 info.ord_valid = true;
1344 }
1345
1346 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1347 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1348 ibdev_err(&iwdev->ibdev,
1349 "rd_atomic = %d, above max_hw_ird=%d\n",
1350 attr->max_dest_rd_atomic,
1351 dev->hw_attrs.max_hw_ird);
1352 return -EINVAL;
1353 }
1354 if (attr->max_dest_rd_atomic)
1355 roce_info->ird_size = attr->max_dest_rd_atomic;
1356 }
1357
1358 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1359 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1360 roce_info->wr_rdresp_en = true;
1361 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1362 roce_info->wr_rdresp_en = true;
1363 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1364 roce_info->rd_en = true;
1365 }
1366
1367 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1368
1369 ibdev_dbg(&iwdev->ibdev,
1370 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1371 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1372 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1373
1374 spin_lock_irqsave(&iwqp->lock, flags);
1375 if (attr_mask & IB_QP_STATE) {
1376 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1377 iwqp->ibqp.qp_type, attr_mask)) {
1378 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1379 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1380 attr->qp_state);
1381 ret = -EINVAL;
1382 goto exit;
1383 }
1384 info.curr_iwarp_state = iwqp->iwarp_state;
1385
1386 switch (attr->qp_state) {
1387 case IB_QPS_INIT:
1388 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1389 ret = -EINVAL;
1390 goto exit;
1391 }
1392
1393 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1394 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1395 issue_modify_qp = 1;
1396 }
1397 break;
1398 case IB_QPS_RTR:
1399 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1400 ret = -EINVAL;
1401 goto exit;
1402 }
1403 info.arp_cache_idx_valid = true;
1404 info.cq_num_valid = true;
1405 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1406 issue_modify_qp = 1;
1407 break;
1408 case IB_QPS_RTS:
1409 if (iwqp->ibqp_state < IB_QPS_RTR ||
1410 iwqp->ibqp_state == IB_QPS_ERR) {
1411 ret = -EINVAL;
1412 goto exit;
1413 }
1414
1415 info.arp_cache_idx_valid = true;
1416 info.cq_num_valid = true;
1417 info.ord_valid = true;
1418 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1419 issue_modify_qp = 1;
1420 if (iwdev->push_mode && udata &&
1421 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1422 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1423 spin_unlock_irqrestore(&iwqp->lock, flags);
1424 irdma_alloc_push_page(iwqp);
1425 spin_lock_irqsave(&iwqp->lock, flags);
1426 }
1427 break;
1428 case IB_QPS_SQD:
1429 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1430 goto exit;
1431
1432 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1433 ret = -EINVAL;
1434 goto exit;
1435 }
1436
1437 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1438 issue_modify_qp = 1;
1439 iwqp->suspend_pending = true;
1440 break;
1441 case IB_QPS_SQE:
1442 case IB_QPS_ERR:
1443 case IB_QPS_RESET:
1444 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1445 spin_unlock_irqrestore(&iwqp->lock, flags);
1446 if (udata && udata->inlen) {
1447 if (ib_copy_from_udata(&ureq, udata,
1448 min(sizeof(ureq), udata->inlen)))
1449 return -EINVAL;
1450
1451 irdma_flush_wqes(iwqp,
1452 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1453 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1454 IRDMA_REFLUSH);
1455 }
1456 return 0;
1457 }
1458
1459 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1460 issue_modify_qp = 1;
1461 break;
1462 default:
1463 ret = -EINVAL;
1464 goto exit;
1465 }
1466
1467 iwqp->ibqp_state = attr->qp_state;
1468 }
1469
1470 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1471 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1472 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1473 spin_unlock_irqrestore(&iwqp->lock, flags);
1474
1475 if (attr_mask & IB_QP_STATE) {
1476 if (issue_modify_qp) {
1477 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1478 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1479 return -EINVAL;
1480 if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
1481 ret = irdma_wait_for_suspend(iwqp);
1482 if (ret)
1483 return ret;
1484 }
1485 spin_lock_irqsave(&iwqp->lock, flags);
1486 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1487 iwqp->iwarp_state = info.next_iwarp_state;
1488 iwqp->ibqp_state = attr->qp_state;
1489 }
1490 if (iwqp->ibqp_state > IB_QPS_RTS &&
1491 !iwqp->flush_issued) {
1492 spin_unlock_irqrestore(&iwqp->lock, flags);
1493 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1494 IRDMA_FLUSH_RQ |
1495 IRDMA_FLUSH_WAIT);
1496 iwqp->flush_issued = 1;
1497 } else {
1498 spin_unlock_irqrestore(&iwqp->lock, flags);
1499 }
1500 } else {
1501 iwqp->ibqp_state = attr->qp_state;
1502 }
1503 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1504 struct irdma_ucontext *ucontext;
1505
1506 ucontext = rdma_udata_to_drv_context(udata,
1507 struct irdma_ucontext, ibucontext);
1508 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1509 !iwqp->push_wqe_mmap_entry &&
1510 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1511 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1512 uresp.push_valid = 1;
1513 uresp.push_offset = iwqp->sc_qp.push_offset;
1514 }
1515 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1516 udata->outlen));
1517 if (ret) {
1518 irdma_remove_push_mmap_entries(iwqp);
1519 ibdev_dbg(&iwdev->ibdev,
1520 "VERBS: copy_to_udata failed\n");
1521 return ret;
1522 }
1523 }
1524 }
1525
1526 return 0;
1527 exit:
1528 spin_unlock_irqrestore(&iwqp->lock, flags);
1529
1530 return ret;
1531 }
1532
1533 /**
1534 * irdma_modify_qp - modify qp request
1535 * @ibqp: qp's pointer for modify
1536 * @attr: access attributes
1537 * @attr_mask: state mask
1538 * @udata: user data
1539 */
irdma_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1540 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1541 struct ib_udata *udata)
1542 {
1543 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
1544 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
1545 struct irdma_qp *iwqp = to_iwqp(ibqp);
1546 struct irdma_device *iwdev = iwqp->iwdev;
1547 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1548 struct irdma_qp_host_ctx_info *ctx_info;
1549 struct irdma_tcp_offload_info *tcp_info;
1550 struct irdma_iwarp_offload_info *offload_info;
1551 struct irdma_modify_qp_info info = {};
1552 struct irdma_modify_qp_resp uresp = {};
1553 struct irdma_modify_qp_req ureq = {};
1554 u8 issue_modify_qp = 0;
1555 u8 dont_wait = 0;
1556 int err;
1557 unsigned long flags;
1558
1559 if (udata) {
1560 /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
1561 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
1562 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
1563 return -EINVAL;
1564 }
1565
1566 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1567 return -EOPNOTSUPP;
1568
1569 ctx_info = &iwqp->ctx_info;
1570 offload_info = &iwqp->iwarp_info;
1571 tcp_info = &iwqp->tcp_info;
1572 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1573 ibdev_dbg(&iwdev->ibdev,
1574 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1575 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1576 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1577 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1578
1579 spin_lock_irqsave(&iwqp->lock, flags);
1580 if (attr_mask & IB_QP_STATE) {
1581 info.curr_iwarp_state = iwqp->iwarp_state;
1582 switch (attr->qp_state) {
1583 case IB_QPS_INIT:
1584 case IB_QPS_RTR:
1585 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1586 err = -EINVAL;
1587 goto exit;
1588 }
1589
1590 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1591 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1592 issue_modify_qp = 1;
1593 }
1594 if (iwdev->push_mode && udata &&
1595 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1596 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1597 spin_unlock_irqrestore(&iwqp->lock, flags);
1598 irdma_alloc_push_page(iwqp);
1599 spin_lock_irqsave(&iwqp->lock, flags);
1600 }
1601 break;
1602 case IB_QPS_RTS:
1603 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1604 !iwqp->cm_id) {
1605 err = -EINVAL;
1606 goto exit;
1607 }
1608
1609 issue_modify_qp = 1;
1610 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1611 iwqp->hte_added = 1;
1612 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1613 info.tcp_ctx_valid = true;
1614 info.ord_valid = true;
1615 info.arp_cache_idx_valid = true;
1616 info.cq_num_valid = true;
1617 break;
1618 case IB_QPS_SQD:
1619 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1620 err = 0;
1621 goto exit;
1622 }
1623
1624 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1625 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1626 err = 0;
1627 goto exit;
1628 }
1629
1630 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1631 err = -EINVAL;
1632 goto exit;
1633 }
1634
1635 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1636 issue_modify_qp = 1;
1637 break;
1638 case IB_QPS_SQE:
1639 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1640 err = -EINVAL;
1641 goto exit;
1642 }
1643
1644 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1645 issue_modify_qp = 1;
1646 break;
1647 case IB_QPS_ERR:
1648 case IB_QPS_RESET:
1649 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1650 spin_unlock_irqrestore(&iwqp->lock, flags);
1651 if (udata && udata->inlen) {
1652 if (ib_copy_from_udata(&ureq, udata,
1653 min(sizeof(ureq), udata->inlen)))
1654 return -EINVAL;
1655
1656 irdma_flush_wqes(iwqp,
1657 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1658 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1659 IRDMA_REFLUSH);
1660 }
1661 return 0;
1662 }
1663
1664 if (iwqp->sc_qp.term_flags) {
1665 spin_unlock_irqrestore(&iwqp->lock, flags);
1666 irdma_terminate_del_timer(&iwqp->sc_qp);
1667 spin_lock_irqsave(&iwqp->lock, flags);
1668 }
1669 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1670 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1671 iwdev->iw_status &&
1672 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1673 info.reset_tcp_conn = true;
1674 else
1675 dont_wait = 1;
1676
1677 issue_modify_qp = 1;
1678 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1679 break;
1680 default:
1681 err = -EINVAL;
1682 goto exit;
1683 }
1684
1685 iwqp->ibqp_state = attr->qp_state;
1686 }
1687 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1688 ctx_info->iwarp_info_valid = true;
1689 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1690 offload_info->wr_rdresp_en = true;
1691 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1692 offload_info->wr_rdresp_en = true;
1693 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1694 offload_info->rd_en = true;
1695 }
1696
1697 if (ctx_info->iwarp_info_valid) {
1698 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1699 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1700 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1701 }
1702 spin_unlock_irqrestore(&iwqp->lock, flags);
1703
1704 if (attr_mask & IB_QP_STATE) {
1705 if (issue_modify_qp) {
1706 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1707 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1708 return -EINVAL;
1709 }
1710
1711 spin_lock_irqsave(&iwqp->lock, flags);
1712 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1713 iwqp->iwarp_state = info.next_iwarp_state;
1714 iwqp->ibqp_state = attr->qp_state;
1715 }
1716 spin_unlock_irqrestore(&iwqp->lock, flags);
1717 }
1718
1719 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1720 if (dont_wait) {
1721 if (iwqp->hw_tcp_state) {
1722 spin_lock_irqsave(&iwqp->lock, flags);
1723 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1724 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1725 spin_unlock_irqrestore(&iwqp->lock, flags);
1726 }
1727 irdma_cm_disconn(iwqp);
1728 } else {
1729 int close_timer_started;
1730
1731 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1732
1733 if (iwqp->cm_node) {
1734 refcount_inc(&iwqp->cm_node->refcnt);
1735 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1736 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1737 if (iwqp->cm_id && close_timer_started == 1)
1738 irdma_schedule_cm_timer(iwqp->cm_node,
1739 (struct irdma_puda_buf *)iwqp,
1740 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1741
1742 irdma_rem_ref_cm_node(iwqp->cm_node);
1743 } else {
1744 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1745 }
1746 }
1747 }
1748 if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
1749 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1750 struct irdma_ucontext *ucontext;
1751
1752 ucontext = rdma_udata_to_drv_context(udata,
1753 struct irdma_ucontext, ibucontext);
1754 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1755 !iwqp->push_wqe_mmap_entry &&
1756 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1757 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1758 uresp.push_valid = 1;
1759 uresp.push_offset = iwqp->sc_qp.push_offset;
1760 }
1761
1762 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1763 udata->outlen));
1764 if (err) {
1765 irdma_remove_push_mmap_entries(iwqp);
1766 ibdev_dbg(&iwdev->ibdev,
1767 "VERBS: copy_to_udata failed\n");
1768 return err;
1769 }
1770 }
1771
1772 return 0;
1773 exit:
1774 spin_unlock_irqrestore(&iwqp->lock, flags);
1775
1776 return err;
1777 }
1778
1779 /**
1780 * irdma_cq_free_rsrc - free up resources for cq
1781 * @rf: RDMA PCI function
1782 * @iwcq: cq ptr
1783 */
irdma_cq_free_rsrc(struct irdma_pci_f * rf,struct irdma_cq * iwcq)1784 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1785 {
1786 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1787
1788 if (!iwcq->user_mode) {
1789 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1790 iwcq->kmem.va, iwcq->kmem.pa);
1791 iwcq->kmem.va = NULL;
1792 dma_free_coherent(rf->sc_dev.hw->device,
1793 iwcq->kmem_shadow.size,
1794 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1795 iwcq->kmem_shadow.va = NULL;
1796 }
1797
1798 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1799 }
1800
1801 /**
1802 * irdma_free_cqbuf - worker to free a cq buffer
1803 * @work: provides access to the cq buffer to free
1804 */
irdma_free_cqbuf(struct work_struct * work)1805 static void irdma_free_cqbuf(struct work_struct *work)
1806 {
1807 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1808
1809 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1810 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1811 cq_buf->kmem_buf.va = NULL;
1812 kfree(cq_buf);
1813 }
1814
1815 /**
1816 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1817 * @iwcq: cq which owns the resize_list
1818 * @iwdev: irdma device
1819 * @lcqe_buf: the buffer where the last cqe is received
1820 */
irdma_process_resize_list(struct irdma_cq * iwcq,struct irdma_device * iwdev,struct irdma_cq_buf * lcqe_buf)1821 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1822 struct irdma_device *iwdev,
1823 struct irdma_cq_buf *lcqe_buf)
1824 {
1825 struct list_head *tmp_node, *list_node;
1826 struct irdma_cq_buf *cq_buf;
1827 int cnt = 0;
1828
1829 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1830 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1831 if (cq_buf == lcqe_buf)
1832 return cnt;
1833
1834 list_del(&cq_buf->list);
1835 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1836 cnt++;
1837 }
1838
1839 return cnt;
1840 }
1841
1842 /**
1843 * irdma_destroy_cq - destroy cq
1844 * @ib_cq: cq pointer
1845 * @udata: user data
1846 */
irdma_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)1847 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1848 {
1849 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1850 struct irdma_cq *iwcq = to_iwcq(ib_cq);
1851 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1852 struct irdma_sc_dev *dev = cq->dev;
1853 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1854 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1855 unsigned long flags;
1856
1857 spin_lock_irqsave(&iwcq->lock, flags);
1858 if (!list_empty(&iwcq->cmpl_generated))
1859 irdma_remove_cmpls_list(iwcq);
1860 if (!list_empty(&iwcq->resize_list))
1861 irdma_process_resize_list(iwcq, iwdev, NULL);
1862 spin_unlock_irqrestore(&iwcq->lock, flags);
1863
1864 irdma_cq_rem_ref(ib_cq);
1865 wait_for_completion(&iwcq->free_cq);
1866
1867 irdma_cq_wq_destroy(iwdev->rf, cq);
1868
1869 spin_lock_irqsave(&iwceq->ce_lock, flags);
1870 irdma_sc_cleanup_ceqes(cq, ceq);
1871 spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1872 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1873
1874 return 0;
1875 }
1876
1877 /**
1878 * irdma_resize_cq - resize cq
1879 * @ibcq: cq to be resized
1880 * @entries: desired cq size
1881 * @udata: user data
1882 */
irdma_resize_cq(struct ib_cq * ibcq,int entries,struct ib_udata * udata)1883 static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
1884 struct ib_udata *udata)
1885 {
1886 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
1887 struct irdma_cq *iwcq = to_iwcq(ibcq);
1888 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1889 struct irdma_cqp_request *cqp_request;
1890 struct cqp_cmds_info *cqp_info;
1891 struct irdma_modify_cq_info *m_info;
1892 struct irdma_modify_cq_info info = {};
1893 struct irdma_dma_mem kmem_buf;
1894 struct irdma_cq_mr *cqmr_buf;
1895 struct irdma_pbl *iwpbl_buf;
1896 struct irdma_device *iwdev;
1897 struct irdma_pci_f *rf;
1898 struct irdma_cq_buf *cq_buf = NULL;
1899 unsigned long flags;
1900 int ret;
1901
1902 iwdev = to_iwdev(ibcq->device);
1903 rf = iwdev->rf;
1904
1905 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1906 IRDMA_FEATURE_CQ_RESIZE))
1907 return -EOPNOTSUPP;
1908
1909 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
1910 return -EINVAL;
1911
1912 if (entries > rf->max_cqe)
1913 return -EINVAL;
1914
1915 if (!iwcq->user_mode) {
1916 entries++;
1917 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1918 entries *= 2;
1919 }
1920
1921 info.cq_size = max(entries, 4);
1922
1923 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1924 return 0;
1925
1926 if (udata) {
1927 struct irdma_resize_cq_req req = {};
1928 struct irdma_ucontext *ucontext =
1929 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1930 ibucontext);
1931
1932 /* CQ resize not supported with legacy GEN_1 libi40iw */
1933 if (ucontext->legacy_mode)
1934 return -EOPNOTSUPP;
1935
1936 if (ib_copy_from_udata(&req, udata,
1937 min(sizeof(req), udata->inlen)))
1938 return -EINVAL;
1939
1940 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1941 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1942 &ucontext->cq_reg_mem_list);
1943 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1944
1945 if (!iwpbl_buf)
1946 return -ENOMEM;
1947
1948 cqmr_buf = &iwpbl_buf->cq_mr;
1949 if (iwpbl_buf->pbl_allocated) {
1950 info.virtual_map = true;
1951 info.pbl_chunk_size = 1;
1952 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1953 } else {
1954 info.cq_pa = cqmr_buf->cq_pbl.addr;
1955 }
1956 } else {
1957 /* Kmode CQ resize */
1958 int rsize;
1959
1960 rsize = info.cq_size * sizeof(struct irdma_cqe);
1961 kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
1962 kmem_buf.va = dma_alloc_coherent(dev->hw->device,
1963 kmem_buf.size, &kmem_buf.pa,
1964 GFP_KERNEL);
1965 if (!kmem_buf.va)
1966 return -ENOMEM;
1967
1968 info.cq_base = kmem_buf.va;
1969 info.cq_pa = kmem_buf.pa;
1970 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1971 if (!cq_buf) {
1972 ret = -ENOMEM;
1973 goto error;
1974 }
1975 }
1976
1977 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1978 if (!cqp_request) {
1979 ret = -ENOMEM;
1980 goto error;
1981 }
1982
1983 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1984 info.cq_resize = true;
1985
1986 cqp_info = &cqp_request->info;
1987 m_info = &cqp_info->in.u.cq_modify.info;
1988 memcpy(m_info, &info, sizeof(*m_info));
1989
1990 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1991 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1992 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1993 cqp_info->post_sq = 1;
1994 ret = irdma_handle_cqp_op(rf, cqp_request);
1995 irdma_put_cqp_request(&rf->cqp, cqp_request);
1996 if (ret)
1997 goto error;
1998
1999 spin_lock_irqsave(&iwcq->lock, flags);
2000 if (cq_buf) {
2001 cq_buf->kmem_buf = iwcq->kmem;
2002 cq_buf->hw = dev->hw;
2003 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
2004 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
2005 list_add_tail(&cq_buf->list, &iwcq->resize_list);
2006 iwcq->kmem = kmem_buf;
2007 }
2008
2009 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
2010 ibcq->cqe = info.cq_size - 1;
2011 spin_unlock_irqrestore(&iwcq->lock, flags);
2012
2013 return 0;
2014 error:
2015 if (!udata) {
2016 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
2017 kmem_buf.pa);
2018 kmem_buf.va = NULL;
2019 }
2020 kfree(cq_buf);
2021
2022 return ret;
2023 }
2024
cq_validate_flags(u32 flags,u8 hw_rev)2025 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
2026 {
2027 /* GEN1 does not support CQ create flags */
2028 if (hw_rev == IRDMA_GEN_1)
2029 return flags ? -EOPNOTSUPP : 0;
2030
2031 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
2032 }
2033
2034 /**
2035 * irdma_create_cq - create cq
2036 * @ibcq: CQ allocated
2037 * @attr: attributes for cq
2038 * @attrs: uverbs attribute bundle
2039 */
irdma_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct uverbs_attr_bundle * attrs)2040 static int irdma_create_cq(struct ib_cq *ibcq,
2041 const struct ib_cq_init_attr *attr,
2042 struct uverbs_attr_bundle *attrs)
2043 {
2044 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
2045 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
2046 struct ib_udata *udata = &attrs->driver_udata;
2047 struct ib_device *ibdev = ibcq->device;
2048 struct irdma_device *iwdev = to_iwdev(ibdev);
2049 struct irdma_pci_f *rf = iwdev->rf;
2050 struct irdma_cq *iwcq = to_iwcq(ibcq);
2051 u32 cq_num = 0;
2052 struct irdma_sc_cq *cq;
2053 struct irdma_sc_dev *dev = &rf->sc_dev;
2054 struct irdma_cq_init_info info = {};
2055 struct irdma_cqp_request *cqp_request;
2056 struct cqp_cmds_info *cqp_info;
2057 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
2058 unsigned long flags;
2059 int err_code;
2060 int entries = attr->cqe;
2061
2062 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
2063 if (err_code)
2064 return err_code;
2065
2066 if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
2067 udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
2068 return -EINVAL;
2069
2070 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
2071 &rf->next_cq);
2072 if (err_code)
2073 return err_code;
2074
2075 cq = &iwcq->sc_cq;
2076 cq->back_cq = iwcq;
2077 refcount_set(&iwcq->refcnt, 1);
2078 spin_lock_init(&iwcq->lock);
2079 INIT_LIST_HEAD(&iwcq->resize_list);
2080 INIT_LIST_HEAD(&iwcq->cmpl_generated);
2081 info.dev = dev;
2082 ukinfo->cq_size = max(entries, 4);
2083 ukinfo->cq_id = cq_num;
2084 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
2085 if (attr->comp_vector < rf->ceqs_count)
2086 info.ceq_id = attr->comp_vector;
2087 info.ceq_id_valid = true;
2088 info.ceqe_mask = 1;
2089 info.type = IRDMA_CQ_TYPE_IWARP;
2090 info.vsi = &iwdev->vsi;
2091
2092 if (udata) {
2093 struct irdma_ucontext *ucontext;
2094 struct irdma_create_cq_req req = {};
2095 struct irdma_cq_mr *cqmr;
2096 struct irdma_pbl *iwpbl;
2097 struct irdma_pbl *iwpbl_shadow;
2098 struct irdma_cq_mr *cqmr_shadow;
2099
2100 iwcq->user_mode = true;
2101 ucontext =
2102 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2103 ibucontext);
2104 if (ib_copy_from_udata(&req, udata,
2105 min(sizeof(req), udata->inlen))) {
2106 err_code = -EFAULT;
2107 goto cq_free_rsrc;
2108 }
2109
2110 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2111 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2112 &ucontext->cq_reg_mem_list);
2113 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2114 if (!iwpbl) {
2115 err_code = -EPROTO;
2116 goto cq_free_rsrc;
2117 }
2118
2119 iwcq->iwpbl = iwpbl;
2120 iwcq->cq_mem_size = 0;
2121 cqmr = &iwpbl->cq_mr;
2122
2123 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2124 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2125 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2126 iwpbl_shadow = irdma_get_pbl(
2127 (unsigned long)req.user_shadow_area,
2128 &ucontext->cq_reg_mem_list);
2129 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2130
2131 if (!iwpbl_shadow) {
2132 err_code = -EPROTO;
2133 goto cq_free_rsrc;
2134 }
2135 iwcq->iwpbl_shadow = iwpbl_shadow;
2136 cqmr_shadow = &iwpbl_shadow->cq_mr;
2137 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2138 cqmr->split = true;
2139 } else {
2140 info.shadow_area_pa = cqmr->shadow;
2141 }
2142 if (iwpbl->pbl_allocated) {
2143 info.virtual_map = true;
2144 info.pbl_chunk_size = 1;
2145 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2146 } else {
2147 info.cq_base_pa = cqmr->cq_pbl.addr;
2148 }
2149 } else {
2150 /* Kmode allocations */
2151 int rsize;
2152
2153 if (entries < 1 || entries > rf->max_cqe) {
2154 err_code = -EINVAL;
2155 goto cq_free_rsrc;
2156 }
2157
2158 entries++;
2159 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2160 entries *= 2;
2161 ukinfo->cq_size = entries;
2162
2163 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2164 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2165 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2166 iwcq->kmem.size,
2167 &iwcq->kmem.pa, GFP_KERNEL);
2168 if (!iwcq->kmem.va) {
2169 err_code = -ENOMEM;
2170 goto cq_free_rsrc;
2171 }
2172
2173 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2174 64);
2175 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2176 iwcq->kmem_shadow.size,
2177 &iwcq->kmem_shadow.pa,
2178 GFP_KERNEL);
2179 if (!iwcq->kmem_shadow.va) {
2180 err_code = -ENOMEM;
2181 goto cq_free_rsrc;
2182 }
2183 info.shadow_area_pa = iwcq->kmem_shadow.pa;
2184 ukinfo->shadow_area = iwcq->kmem_shadow.va;
2185 ukinfo->cq_base = iwcq->kmem.va;
2186 info.cq_base_pa = iwcq->kmem.pa;
2187 }
2188
2189 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2190 (u32)IRDMA_MAX_CQ_READ_THRESH);
2191
2192 if (irdma_sc_cq_init(cq, &info)) {
2193 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2194 err_code = -EPROTO;
2195 goto cq_free_rsrc;
2196 }
2197
2198 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2199 if (!cqp_request) {
2200 err_code = -ENOMEM;
2201 goto cq_free_rsrc;
2202 }
2203
2204 cqp_info = &cqp_request->info;
2205 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2206 cqp_info->post_sq = 1;
2207 cqp_info->in.u.cq_create.cq = cq;
2208 cqp_info->in.u.cq_create.check_overflow = true;
2209 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2210 err_code = irdma_handle_cqp_op(rf, cqp_request);
2211 irdma_put_cqp_request(&rf->cqp, cqp_request);
2212 if (err_code)
2213 goto cq_free_rsrc;
2214
2215 if (udata) {
2216 struct irdma_create_cq_resp resp = {};
2217
2218 resp.cq_id = info.cq_uk_init_info.cq_id;
2219 resp.cq_size = info.cq_uk_init_info.cq_size;
2220 if (ib_copy_to_udata(udata, &resp,
2221 min(sizeof(resp), udata->outlen))) {
2222 ibdev_dbg(&iwdev->ibdev,
2223 "VERBS: copy to user data\n");
2224 err_code = -EPROTO;
2225 goto cq_destroy;
2226 }
2227 }
2228 rf->cq_table[cq_num] = iwcq;
2229 init_completion(&iwcq->free_cq);
2230
2231 return 0;
2232 cq_destroy:
2233 irdma_cq_wq_destroy(rf, cq);
2234 cq_free_rsrc:
2235 irdma_cq_free_rsrc(rf, iwcq);
2236
2237 return err_code;
2238 }
2239
2240 /**
2241 * irdma_get_mr_access - get hw MR access permissions from IB access flags
2242 * @access: IB access flags
2243 */
irdma_get_mr_access(int access)2244 static inline u16 irdma_get_mr_access(int access)
2245 {
2246 u16 hw_access = 0;
2247
2248 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2249 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2250 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2251 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2252 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2253 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2254 hw_access |= (access & IB_ACCESS_MW_BIND) ?
2255 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2256 hw_access |= (access & IB_ZERO_BASED) ?
2257 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2258 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2259
2260 return hw_access;
2261 }
2262
2263 /**
2264 * irdma_free_stag - free stag resource
2265 * @iwdev: irdma device
2266 * @stag: stag to free
2267 */
irdma_free_stag(struct irdma_device * iwdev,u32 stag)2268 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2269 {
2270 u32 stag_idx;
2271
2272 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2273 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2274 }
2275
2276 /**
2277 * irdma_create_stag - create random stag
2278 * @iwdev: irdma device
2279 */
irdma_create_stag(struct irdma_device * iwdev)2280 static u32 irdma_create_stag(struct irdma_device *iwdev)
2281 {
2282 u32 stag = 0;
2283 u32 stag_index = 0;
2284 u32 next_stag_index;
2285 u32 driver_key;
2286 u32 random;
2287 u8 consumer_key;
2288 int ret;
2289
2290 get_random_bytes(&random, sizeof(random));
2291 consumer_key = (u8)random;
2292
2293 driver_key = random & ~iwdev->rf->mr_stagmask;
2294 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2295 next_stag_index %= iwdev->rf->max_mr;
2296
2297 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2298 iwdev->rf->max_mr, &stag_index,
2299 &next_stag_index);
2300 if (ret)
2301 return stag;
2302 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2303 stag |= driver_key;
2304 stag += (u32)consumer_key;
2305
2306 return stag;
2307 }
2308
2309 /**
2310 * irdma_next_pbl_addr - Get next pbl address
2311 * @pbl: pointer to a pble
2312 * @pinfo: info pointer
2313 * @idx: index
2314 */
irdma_next_pbl_addr(u64 * pbl,struct irdma_pble_info ** pinfo,u32 * idx)2315 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2316 u32 *idx)
2317 {
2318 *idx += 1;
2319 if (!(*pinfo) || *idx != (*pinfo)->cnt)
2320 return ++pbl;
2321 *idx = 0;
2322 (*pinfo)++;
2323
2324 return (*pinfo)->addr;
2325 }
2326
2327 /**
2328 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2329 * @iwmr: iwmr for IB's user page addresses
2330 * @pbl: ple pointer to save 1 level or 0 level pble
2331 * @level: indicated level 0, 1 or 2
2332 */
irdma_copy_user_pgaddrs(struct irdma_mr * iwmr,u64 * pbl,enum irdma_pble_level level)2333 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2334 enum irdma_pble_level level)
2335 {
2336 struct ib_umem *region = iwmr->region;
2337 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2338 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2339 struct irdma_pble_info *pinfo;
2340 struct ib_block_iter biter;
2341 u32 idx = 0;
2342 u32 pbl_cnt = 0;
2343
2344 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2345
2346 if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2347 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2348
2349 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2350 *pbl = rdma_block_iter_dma_address(&biter);
2351 if (++pbl_cnt == palloc->total_cnt)
2352 break;
2353 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2354 }
2355 }
2356
2357 /**
2358 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2359 * @arr: lvl1 pbl array
2360 * @npages: page count
2361 * @pg_size: page size
2362 *
2363 */
irdma_check_mem_contiguous(u64 * arr,u32 npages,u32 pg_size)2364 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2365 {
2366 u32 pg_idx;
2367
2368 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2369 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2370 return false;
2371 }
2372
2373 return true;
2374 }
2375
2376 /**
2377 * irdma_check_mr_contiguous - check if MR is physically contiguous
2378 * @palloc: pbl allocation struct
2379 * @pg_size: page size
2380 */
irdma_check_mr_contiguous(struct irdma_pble_alloc * palloc,u32 pg_size)2381 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2382 u32 pg_size)
2383 {
2384 struct irdma_pble_level2 *lvl2 = &palloc->level2;
2385 struct irdma_pble_info *leaf = lvl2->leaf;
2386 u64 *arr = NULL;
2387 u64 *start_addr = NULL;
2388 int i;
2389 bool ret;
2390
2391 if (palloc->level == PBLE_LEVEL_1) {
2392 arr = palloc->level1.addr;
2393 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2394 pg_size);
2395 return ret;
2396 }
2397
2398 start_addr = leaf->addr;
2399
2400 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2401 arr = leaf->addr;
2402 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2403 return false;
2404 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2405 if (!ret)
2406 return false;
2407 }
2408
2409 return true;
2410 }
2411
2412 /**
2413 * irdma_setup_pbles - copy user pg address to pble's
2414 * @rf: RDMA PCI function
2415 * @iwmr: mr pointer for this memory registration
2416 * @lvl: requested pble levels
2417 */
irdma_setup_pbles(struct irdma_pci_f * rf,struct irdma_mr * iwmr,u8 lvl)2418 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2419 u8 lvl)
2420 {
2421 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2422 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2423 struct irdma_pble_info *pinfo;
2424 u64 *pbl;
2425 int status;
2426 enum irdma_pble_level level = PBLE_LEVEL_1;
2427
2428 if (lvl) {
2429 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2430 lvl);
2431 if (status)
2432 return status;
2433
2434 iwpbl->pbl_allocated = true;
2435 level = palloc->level;
2436 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2437 palloc->level2.leaf;
2438 pbl = pinfo->addr;
2439 } else {
2440 pbl = iwmr->pgaddrmem;
2441 }
2442
2443 irdma_copy_user_pgaddrs(iwmr, pbl, level);
2444
2445 if (lvl)
2446 iwmr->pgaddrmem[0] = *pbl;
2447
2448 return 0;
2449 }
2450
2451 /**
2452 * irdma_handle_q_mem - handle memory for qp and cq
2453 * @iwdev: irdma device
2454 * @req: information for q memory management
2455 * @iwpbl: pble struct
2456 * @lvl: pble level mask
2457 */
irdma_handle_q_mem(struct irdma_device * iwdev,struct irdma_mem_reg_req * req,struct irdma_pbl * iwpbl,u8 lvl)2458 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2459 struct irdma_mem_reg_req *req,
2460 struct irdma_pbl *iwpbl, u8 lvl)
2461 {
2462 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2463 struct irdma_mr *iwmr = iwpbl->iwmr;
2464 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2465 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2466 struct irdma_hmc_pble *hmc_p;
2467 u64 *arr = iwmr->pgaddrmem;
2468 u32 pg_size, total;
2469 int err = 0;
2470 bool ret = true;
2471
2472 pg_size = iwmr->page_size;
2473 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2474 if (err)
2475 return err;
2476
2477 if (lvl)
2478 arr = palloc->level1.addr;
2479
2480 switch (iwmr->type) {
2481 case IRDMA_MEMREG_TYPE_QP:
2482 total = req->sq_pages + req->rq_pages;
2483 hmc_p = &qpmr->sq_pbl;
2484 qpmr->shadow = (dma_addr_t)arr[total];
2485
2486 if (lvl) {
2487 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2488 pg_size);
2489 if (ret)
2490 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2491 req->rq_pages,
2492 pg_size);
2493 }
2494
2495 if (!ret) {
2496 hmc_p->idx = palloc->level1.idx;
2497 hmc_p = &qpmr->rq_pbl;
2498 hmc_p->idx = palloc->level1.idx + req->sq_pages;
2499 } else {
2500 hmc_p->addr = arr[0];
2501 hmc_p = &qpmr->rq_pbl;
2502 hmc_p->addr = arr[req->sq_pages];
2503 }
2504 break;
2505 case IRDMA_MEMREG_TYPE_CQ:
2506 hmc_p = &cqmr->cq_pbl;
2507
2508 if (!cqmr->split)
2509 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2510
2511 if (lvl)
2512 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2513 pg_size);
2514
2515 if (!ret)
2516 hmc_p->idx = palloc->level1.idx;
2517 else
2518 hmc_p->addr = arr[0];
2519 break;
2520 default:
2521 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2522 err = -EINVAL;
2523 }
2524
2525 if (lvl && ret) {
2526 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2527 iwpbl->pbl_allocated = false;
2528 }
2529
2530 return err;
2531 }
2532
2533 /**
2534 * irdma_hw_alloc_mw - create the hw memory window
2535 * @iwdev: irdma device
2536 * @iwmr: pointer to memory window info
2537 */
irdma_hw_alloc_mw(struct irdma_device * iwdev,struct irdma_mr * iwmr)2538 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2539 {
2540 struct irdma_mw_alloc_info *info;
2541 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2542 struct irdma_cqp_request *cqp_request;
2543 struct cqp_cmds_info *cqp_info;
2544 int status;
2545
2546 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2547 if (!cqp_request)
2548 return -ENOMEM;
2549
2550 cqp_info = &cqp_request->info;
2551 info = &cqp_info->in.u.mw_alloc.info;
2552 memset(info, 0, sizeof(*info));
2553 if (iwmr->ibmw.type == IB_MW_TYPE_1)
2554 info->mw_wide = true;
2555
2556 info->page_size = PAGE_SIZE;
2557 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2558 info->pd_id = iwpd->sc_pd.pd_id;
2559 info->remote_access = true;
2560 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2561 cqp_info->post_sq = 1;
2562 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2563 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2564 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2565 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2566
2567 return status;
2568 }
2569
2570 /**
2571 * irdma_alloc_mw - Allocate memory window
2572 * @ibmw: Memory Window
2573 * @udata: user data pointer
2574 */
irdma_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)2575 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2576 {
2577 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2578 struct irdma_mr *iwmr = to_iwmw(ibmw);
2579 int err_code;
2580 u32 stag;
2581
2582 stag = irdma_create_stag(iwdev);
2583 if (!stag)
2584 return -ENOMEM;
2585
2586 iwmr->stag = stag;
2587 ibmw->rkey = stag;
2588
2589 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2590 if (err_code) {
2591 irdma_free_stag(iwdev, stag);
2592 return err_code;
2593 }
2594
2595 return 0;
2596 }
2597
2598 /**
2599 * irdma_dealloc_mw - Dealloc memory window
2600 * @ibmw: memory window structure.
2601 */
irdma_dealloc_mw(struct ib_mw * ibmw)2602 static int irdma_dealloc_mw(struct ib_mw *ibmw)
2603 {
2604 struct ib_pd *ibpd = ibmw->pd;
2605 struct irdma_pd *iwpd = to_iwpd(ibpd);
2606 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2607 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2608 struct irdma_cqp_request *cqp_request;
2609 struct cqp_cmds_info *cqp_info;
2610 struct irdma_dealloc_stag_info *info;
2611
2612 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2613 if (!cqp_request)
2614 return -ENOMEM;
2615
2616 cqp_info = &cqp_request->info;
2617 info = &cqp_info->in.u.dealloc_stag.info;
2618 memset(info, 0, sizeof(*info));
2619 info->pd_id = iwpd->sc_pd.pd_id;
2620 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
2621 info->mr = false;
2622 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2623 cqp_info->post_sq = 1;
2624 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2625 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2626 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2627 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2628 irdma_free_stag(iwdev, iwmr->stag);
2629
2630 return 0;
2631 }
2632
2633 /**
2634 * irdma_hw_alloc_stag - cqp command to allocate stag
2635 * @iwdev: irdma device
2636 * @iwmr: irdma mr pointer
2637 */
irdma_hw_alloc_stag(struct irdma_device * iwdev,struct irdma_mr * iwmr)2638 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2639 struct irdma_mr *iwmr)
2640 {
2641 struct irdma_allocate_stag_info *info;
2642 struct ib_pd *pd = iwmr->ibmr.pd;
2643 struct irdma_pd *iwpd = to_iwpd(pd);
2644 int status;
2645 struct irdma_cqp_request *cqp_request;
2646 struct cqp_cmds_info *cqp_info;
2647
2648 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2649 if (!cqp_request)
2650 return -ENOMEM;
2651
2652 cqp_info = &cqp_request->info;
2653 info = &cqp_info->in.u.alloc_stag.info;
2654 memset(info, 0, sizeof(*info));
2655 info->page_size = PAGE_SIZE;
2656 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2657 info->pd_id = iwpd->sc_pd.pd_id;
2658 info->total_len = iwmr->len;
2659 info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
2660 info->remote_access = true;
2661 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2662 cqp_info->post_sq = 1;
2663 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2664 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2665 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2666 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2667 if (status)
2668 return status;
2669
2670 iwmr->is_hwreg = 1;
2671 return 0;
2672 }
2673
2674 /**
2675 * irdma_alloc_mr - register stag for fast memory registration
2676 * @pd: ibpd pointer
2677 * @mr_type: memory for stag registrion
2678 * @max_num_sg: man number of pages
2679 */
irdma_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)2680 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2681 u32 max_num_sg)
2682 {
2683 struct irdma_device *iwdev = to_iwdev(pd->device);
2684 struct irdma_pble_alloc *palloc;
2685 struct irdma_pbl *iwpbl;
2686 struct irdma_mr *iwmr;
2687 u32 stag;
2688 int err_code;
2689
2690 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2691 if (!iwmr)
2692 return ERR_PTR(-ENOMEM);
2693
2694 stag = irdma_create_stag(iwdev);
2695 if (!stag) {
2696 err_code = -ENOMEM;
2697 goto err;
2698 }
2699
2700 iwmr->stag = stag;
2701 iwmr->ibmr.rkey = stag;
2702 iwmr->ibmr.lkey = stag;
2703 iwmr->ibmr.pd = pd;
2704 iwmr->ibmr.device = pd->device;
2705 iwpbl = &iwmr->iwpbl;
2706 iwpbl->iwmr = iwmr;
2707 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2708 palloc = &iwpbl->pble_alloc;
2709 iwmr->page_cnt = max_num_sg;
2710 /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
2711 iwmr->len = max_num_sg * PAGE_SIZE;
2712 err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2713 false);
2714 if (err_code)
2715 goto err_get_pble;
2716
2717 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2718 if (err_code)
2719 goto err_alloc_stag;
2720
2721 iwpbl->pbl_allocated = true;
2722
2723 return &iwmr->ibmr;
2724 err_alloc_stag:
2725 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2726 err_get_pble:
2727 irdma_free_stag(iwdev, stag);
2728 err:
2729 kfree(iwmr);
2730
2731 return ERR_PTR(err_code);
2732 }
2733
2734 /**
2735 * irdma_set_page - populate pbl list for fmr
2736 * @ibmr: ib mem to access iwarp mr pointer
2737 * @addr: page dma address fro pbl list
2738 */
irdma_set_page(struct ib_mr * ibmr,u64 addr)2739 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
2740 {
2741 struct irdma_mr *iwmr = to_iwmr(ibmr);
2742 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2743 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2744 u64 *pbl;
2745
2746 if (unlikely(iwmr->npages == iwmr->page_cnt))
2747 return -ENOMEM;
2748
2749 if (palloc->level == PBLE_LEVEL_2) {
2750 struct irdma_pble_info *palloc_info =
2751 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
2752
2753 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
2754 } else {
2755 pbl = palloc->level1.addr;
2756 pbl[iwmr->npages] = addr;
2757 }
2758 iwmr->npages++;
2759
2760 return 0;
2761 }
2762
2763 /**
2764 * irdma_map_mr_sg - map of sg list for fmr
2765 * @ibmr: ib mem to access iwarp mr pointer
2766 * @sg: scatter gather list
2767 * @sg_nents: number of sg pages
2768 * @sg_offset: scatter gather list for fmr
2769 */
irdma_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)2770 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2771 int sg_nents, unsigned int *sg_offset)
2772 {
2773 struct irdma_mr *iwmr = to_iwmr(ibmr);
2774
2775 iwmr->npages = 0;
2776
2777 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2778 }
2779
2780 /**
2781 * irdma_hwreg_mr - send cqp command for memory registration
2782 * @iwdev: irdma device
2783 * @iwmr: irdma mr pointer
2784 * @access: access for MR
2785 */
irdma_hwreg_mr(struct irdma_device * iwdev,struct irdma_mr * iwmr,u16 access)2786 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2787 u16 access)
2788 {
2789 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2790 struct irdma_reg_ns_stag_info *stag_info;
2791 struct ib_pd *pd = iwmr->ibmr.pd;
2792 struct irdma_pd *iwpd = to_iwpd(pd);
2793 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2794 struct irdma_cqp_request *cqp_request;
2795 struct cqp_cmds_info *cqp_info;
2796 int ret;
2797
2798 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2799 if (!cqp_request)
2800 return -ENOMEM;
2801
2802 cqp_info = &cqp_request->info;
2803 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2804 memset(stag_info, 0, sizeof(*stag_info));
2805 stag_info->va = iwpbl->user_base;
2806 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2807 stag_info->stag_key = (u8)iwmr->stag;
2808 stag_info->total_len = iwmr->len;
2809 stag_info->access_rights = irdma_get_mr_access(access);
2810 stag_info->pd_id = iwpd->sc_pd.pd_id;
2811 stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
2812 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2813 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2814 else
2815 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2816 stag_info->page_size = iwmr->page_size;
2817
2818 if (iwpbl->pbl_allocated) {
2819 if (palloc->level == PBLE_LEVEL_1) {
2820 stag_info->first_pm_pbl_index = palloc->level1.idx;
2821 stag_info->chunk_size = 1;
2822 } else {
2823 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2824 stag_info->chunk_size = 3;
2825 }
2826 } else {
2827 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2828 }
2829
2830 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2831 cqp_info->post_sq = 1;
2832 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2833 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2834 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2835 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2836
2837 if (!ret)
2838 iwmr->is_hwreg = 1;
2839
2840 return ret;
2841 }
2842
irdma_reg_user_mr_type_mem(struct irdma_mr * iwmr,int access,bool create_stag)2843 static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
2844 bool create_stag)
2845 {
2846 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2847 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2848 u32 stag = 0;
2849 u8 lvl;
2850 int err;
2851
2852 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
2853
2854 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
2855 if (err)
2856 return err;
2857
2858 if (lvl) {
2859 err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
2860 iwmr->page_size);
2861 if (err) {
2862 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2863 iwpbl->pbl_allocated = false;
2864 }
2865 }
2866
2867 if (create_stag) {
2868 stag = irdma_create_stag(iwdev);
2869 if (!stag) {
2870 err = -ENOMEM;
2871 goto free_pble;
2872 }
2873
2874 iwmr->stag = stag;
2875 iwmr->ibmr.rkey = stag;
2876 iwmr->ibmr.lkey = stag;
2877 }
2878
2879 err = irdma_hwreg_mr(iwdev, iwmr, access);
2880 if (err)
2881 goto err_hwreg;
2882
2883 return 0;
2884
2885 err_hwreg:
2886 if (stag)
2887 irdma_free_stag(iwdev, stag);
2888
2889 free_pble:
2890 if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2891 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
2892
2893 return err;
2894 }
2895
irdma_alloc_iwmr(struct ib_umem * region,struct ib_pd * pd,u64 virt,enum irdma_memreg_type reg_type)2896 static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
2897 struct ib_pd *pd, u64 virt,
2898 enum irdma_memreg_type reg_type)
2899 {
2900 struct irdma_device *iwdev = to_iwdev(pd->device);
2901 struct irdma_pbl *iwpbl;
2902 struct irdma_mr *iwmr;
2903 unsigned long pgsz_bitmap;
2904
2905 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2906 if (!iwmr)
2907 return ERR_PTR(-ENOMEM);
2908
2909 iwpbl = &iwmr->iwpbl;
2910 iwpbl->iwmr = iwmr;
2911 iwmr->region = region;
2912 iwmr->ibmr.pd = pd;
2913 iwmr->ibmr.device = pd->device;
2914 iwmr->ibmr.iova = virt;
2915 iwmr->type = reg_type;
2916
2917 pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
2918 iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
2919
2920 iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
2921 if (unlikely(!iwmr->page_size)) {
2922 kfree(iwmr);
2923 return ERR_PTR(-EOPNOTSUPP);
2924 }
2925
2926 iwmr->len = region->length;
2927 iwpbl->user_base = virt;
2928 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
2929
2930 return iwmr;
2931 }
2932
irdma_free_iwmr(struct irdma_mr * iwmr)2933 static void irdma_free_iwmr(struct irdma_mr *iwmr)
2934 {
2935 kfree(iwmr);
2936 }
2937
irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,struct ib_udata * udata,struct irdma_mr * iwmr)2938 static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
2939 struct ib_udata *udata,
2940 struct irdma_mr *iwmr)
2941 {
2942 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2943 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2944 struct irdma_ucontext *ucontext = NULL;
2945 unsigned long flags;
2946 u32 total;
2947 int err;
2948 u8 lvl;
2949
2950 /* iWarp: Catch page not starting on OS page boundary */
2951 if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
2952 ib_umem_offset(iwmr->region))
2953 return -EINVAL;
2954
2955 total = req.sq_pages + req.rq_pages + 1;
2956 if (total > iwmr->page_cnt)
2957 return -EINVAL;
2958
2959 total = req.sq_pages + req.rq_pages;
2960 lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2961 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2962 if (err)
2963 return err;
2964
2965 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2966 ibucontext);
2967 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2968 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2969 iwpbl->on_list = true;
2970 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2971
2972 return 0;
2973 }
2974
irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,struct ib_udata * udata,struct irdma_mr * iwmr)2975 static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
2976 struct ib_udata *udata,
2977 struct irdma_mr *iwmr)
2978 {
2979 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
2980 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2981 struct irdma_ucontext *ucontext = NULL;
2982 u8 shadow_pgcnt = 1;
2983 unsigned long flags;
2984 u32 total;
2985 int err;
2986 u8 lvl;
2987
2988 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2989 shadow_pgcnt = 0;
2990 total = req.cq_pages + shadow_pgcnt;
2991 if (total > iwmr->page_cnt)
2992 return -EINVAL;
2993
2994 lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
2995 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
2996 if (err)
2997 return err;
2998
2999 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
3000 ibucontext);
3001 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3002 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
3003 iwpbl->on_list = true;
3004 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3005
3006 return 0;
3007 }
3008
3009 /**
3010 * irdma_reg_user_mr - Register a user memory region
3011 * @pd: ptr of pd
3012 * @start: virtual start address
3013 * @len: length of mr
3014 * @virt: virtual address
3015 * @access: access of mr
3016 * @udata: user data
3017 */
irdma_reg_user_mr(struct ib_pd * pd,u64 start,u64 len,u64 virt,int access,struct ib_udata * udata)3018 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
3019 u64 virt, int access,
3020 struct ib_udata *udata)
3021 {
3022 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
3023 struct irdma_device *iwdev = to_iwdev(pd->device);
3024 struct irdma_mem_reg_req req = {};
3025 struct ib_umem *region = NULL;
3026 struct irdma_mr *iwmr = NULL;
3027 int err;
3028
3029 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3030 return ERR_PTR(-EINVAL);
3031
3032 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
3033 return ERR_PTR(-EINVAL);
3034
3035 region = ib_umem_get(pd->device, start, len, access);
3036
3037 if (IS_ERR(region)) {
3038 ibdev_dbg(&iwdev->ibdev,
3039 "VERBS: Failed to create ib_umem region\n");
3040 return (struct ib_mr *)region;
3041 }
3042
3043 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
3044 ib_umem_release(region);
3045 return ERR_PTR(-EFAULT);
3046 }
3047
3048 iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
3049 if (IS_ERR(iwmr)) {
3050 ib_umem_release(region);
3051 return (struct ib_mr *)iwmr;
3052 }
3053
3054 switch (req.reg_type) {
3055 case IRDMA_MEMREG_TYPE_QP:
3056 err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
3057 if (err)
3058 goto error;
3059
3060 break;
3061 case IRDMA_MEMREG_TYPE_CQ:
3062 err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
3063 if (err)
3064 goto error;
3065 break;
3066 case IRDMA_MEMREG_TYPE_MEM:
3067 err = irdma_reg_user_mr_type_mem(iwmr, access, true);
3068 if (err)
3069 goto error;
3070
3071 break;
3072 default:
3073 err = -EINVAL;
3074 goto error;
3075 }
3076
3077 return &iwmr->ibmr;
3078 error:
3079 ib_umem_release(region);
3080 irdma_free_iwmr(iwmr);
3081
3082 return ERR_PTR(err);
3083 }
3084
irdma_reg_user_mr_dmabuf(struct ib_pd * pd,u64 start,u64 len,u64 virt,int fd,int access,struct uverbs_attr_bundle * attrs)3085 static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
3086 u64 len, u64 virt,
3087 int fd, int access,
3088 struct uverbs_attr_bundle *attrs)
3089 {
3090 struct irdma_device *iwdev = to_iwdev(pd->device);
3091 struct ib_umem_dmabuf *umem_dmabuf;
3092 struct irdma_mr *iwmr;
3093 int err;
3094
3095 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3096 return ERR_PTR(-EINVAL);
3097
3098 umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
3099 if (IS_ERR(umem_dmabuf)) {
3100 err = PTR_ERR(umem_dmabuf);
3101 ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
3102 return ERR_PTR(err);
3103 }
3104
3105 iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
3106 if (IS_ERR(iwmr)) {
3107 err = PTR_ERR(iwmr);
3108 goto err_release;
3109 }
3110
3111 err = irdma_reg_user_mr_type_mem(iwmr, access, true);
3112 if (err)
3113 goto err_iwmr;
3114
3115 return &iwmr->ibmr;
3116
3117 err_iwmr:
3118 irdma_free_iwmr(iwmr);
3119
3120 err_release:
3121 ib_umem_release(&umem_dmabuf->umem);
3122
3123 return ERR_PTR(err);
3124 }
3125
irdma_hwdereg_mr(struct ib_mr * ib_mr)3126 static int irdma_hwdereg_mr(struct ib_mr *ib_mr)
3127 {
3128 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3129 struct irdma_mr *iwmr = to_iwmr(ib_mr);
3130 struct irdma_pd *iwpd = to_iwpd(ib_mr->pd);
3131 struct irdma_dealloc_stag_info *info;
3132 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3133 struct irdma_cqp_request *cqp_request;
3134 struct cqp_cmds_info *cqp_info;
3135 int status;
3136
3137 /* Skip HW MR de-register when it is already de-registered
3138 * during an MR re-reregister and the re-registration fails
3139 */
3140 if (!iwmr->is_hwreg)
3141 return 0;
3142
3143 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3144 if (!cqp_request)
3145 return -ENOMEM;
3146
3147 cqp_info = &cqp_request->info;
3148 info = &cqp_info->in.u.dealloc_stag.info;
3149 memset(info, 0, sizeof(*info));
3150 info->pd_id = iwpd->sc_pd.pd_id;
3151 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3152 info->mr = true;
3153 if (iwpbl->pbl_allocated)
3154 info->dealloc_pbl = true;
3155
3156 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3157 cqp_info->post_sq = 1;
3158 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3159 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3160 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3161 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3162 if (status)
3163 return status;
3164
3165 iwmr->is_hwreg = 0;
3166 return 0;
3167 }
3168
3169 /*
3170 * irdma_rereg_mr_trans - Re-register a user MR for a change translation.
3171 * @iwmr: ptr of iwmr
3172 * @start: virtual start address
3173 * @len: length of mr
3174 * @virt: virtual address
3175 *
3176 * Re-register a user memory region when a change translation is requested.
3177 * Re-register a new region while reusing the stag from the original registration.
3178 */
irdma_rereg_mr_trans(struct irdma_mr * iwmr,u64 start,u64 len,u64 virt)3179 static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
3180 u64 virt)
3181 {
3182 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
3183 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3184 struct ib_pd *pd = iwmr->ibmr.pd;
3185 struct ib_umem *region;
3186 int err;
3187
3188 region = ib_umem_get(pd->device, start, len, iwmr->access);
3189 if (IS_ERR(region))
3190 return PTR_ERR(region);
3191
3192 iwmr->region = region;
3193 iwmr->ibmr.iova = virt;
3194 iwmr->ibmr.pd = pd;
3195 iwmr->page_size = ib_umem_find_best_pgsz(region,
3196 iwdev->rf->sc_dev.hw_attrs.page_size_cap,
3197 virt);
3198 if (unlikely(!iwmr->page_size)) {
3199 err = -EOPNOTSUPP;
3200 goto err;
3201 }
3202
3203 iwmr->len = region->length;
3204 iwpbl->user_base = virt;
3205 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
3206
3207 err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false);
3208 if (err)
3209 goto err;
3210
3211 return 0;
3212
3213 err:
3214 ib_umem_release(region);
3215 return err;
3216 }
3217
3218 /*
3219 * irdma_rereg_user_mr - Re-Register a user memory region(MR)
3220 * @ibmr: ib mem to access iwarp mr pointer
3221 * @flags: bit mask to indicate which of the attr's of MR modified
3222 * @start: virtual start address
3223 * @len: length of mr
3224 * @virt: virtual address
3225 * @new_access: bit mask of access flags
3226 * @new_pd: ptr of pd
3227 * @udata: user data
3228 *
3229 * Return:
3230 * NULL - Success, existing MR updated
3231 * ERR_PTR - error occurred
3232 */
irdma_rereg_user_mr(struct ib_mr * ib_mr,int flags,u64 start,u64 len,u64 virt,int new_access,struct ib_pd * new_pd,struct ib_udata * udata)3233 static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags,
3234 u64 start, u64 len, u64 virt,
3235 int new_access, struct ib_pd *new_pd,
3236 struct ib_udata *udata)
3237 {
3238 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3239 struct irdma_mr *iwmr = to_iwmr(ib_mr);
3240 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3241 int ret;
3242
3243 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
3244 return ERR_PTR(-EINVAL);
3245
3246 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
3247 return ERR_PTR(-EOPNOTSUPP);
3248
3249 ret = irdma_hwdereg_mr(ib_mr);
3250 if (ret)
3251 return ERR_PTR(ret);
3252
3253 if (flags & IB_MR_REREG_ACCESS)
3254 iwmr->access = new_access;
3255
3256 if (flags & IB_MR_REREG_PD) {
3257 iwmr->ibmr.pd = new_pd;
3258 iwmr->ibmr.device = new_pd->device;
3259 }
3260
3261 if (flags & IB_MR_REREG_TRANS) {
3262 if (iwpbl->pbl_allocated) {
3263 irdma_free_pble(iwdev->rf->pble_rsrc,
3264 &iwpbl->pble_alloc);
3265 iwpbl->pbl_allocated = false;
3266 }
3267 if (iwmr->region) {
3268 ib_umem_release(iwmr->region);
3269 iwmr->region = NULL;
3270 }
3271
3272 ret = irdma_rereg_mr_trans(iwmr, start, len, virt);
3273 } else
3274 ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
3275 if (ret)
3276 return ERR_PTR(ret);
3277
3278 return NULL;
3279 }
3280
3281 /**
3282 * irdma_reg_phys_mr - register kernel physical memory
3283 * @pd: ibpd pointer
3284 * @addr: physical address of memory to register
3285 * @size: size of memory to register
3286 * @access: Access rights
3287 * @iova_start: start of virtual address for physical buffers
3288 */
irdma_reg_phys_mr(struct ib_pd * pd,u64 addr,u64 size,int access,u64 * iova_start)3289 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
3290 u64 *iova_start)
3291 {
3292 struct irdma_device *iwdev = to_iwdev(pd->device);
3293 struct irdma_pbl *iwpbl;
3294 struct irdma_mr *iwmr;
3295 u32 stag;
3296 int ret;
3297
3298 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
3299 if (!iwmr)
3300 return ERR_PTR(-ENOMEM);
3301
3302 iwmr->ibmr.pd = pd;
3303 iwmr->ibmr.device = pd->device;
3304 iwpbl = &iwmr->iwpbl;
3305 iwpbl->iwmr = iwmr;
3306 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
3307 iwpbl->user_base = *iova_start;
3308 stag = irdma_create_stag(iwdev);
3309 if (!stag) {
3310 ret = -ENOMEM;
3311 goto err;
3312 }
3313
3314 iwmr->stag = stag;
3315 iwmr->ibmr.iova = *iova_start;
3316 iwmr->ibmr.rkey = stag;
3317 iwmr->ibmr.lkey = stag;
3318 iwmr->page_cnt = 1;
3319 iwmr->pgaddrmem[0] = addr;
3320 iwmr->len = size;
3321 iwmr->page_size = SZ_4K;
3322 ret = irdma_hwreg_mr(iwdev, iwmr, access);
3323 if (ret) {
3324 irdma_free_stag(iwdev, stag);
3325 goto err;
3326 }
3327
3328 return &iwmr->ibmr;
3329
3330 err:
3331 kfree(iwmr);
3332
3333 return ERR_PTR(ret);
3334 }
3335
3336 /**
3337 * irdma_get_dma_mr - register physical mem
3338 * @pd: ptr of pd
3339 * @acc: access for memory
3340 */
irdma_get_dma_mr(struct ib_pd * pd,int acc)3341 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
3342 {
3343 u64 kva = 0;
3344
3345 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
3346 }
3347
3348 /**
3349 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
3350 * @iwmr: iwmr for IB's user page addresses
3351 * @ucontext: ptr to user context
3352 */
irdma_del_memlist(struct irdma_mr * iwmr,struct irdma_ucontext * ucontext)3353 static void irdma_del_memlist(struct irdma_mr *iwmr,
3354 struct irdma_ucontext *ucontext)
3355 {
3356 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3357 unsigned long flags;
3358
3359 switch (iwmr->type) {
3360 case IRDMA_MEMREG_TYPE_CQ:
3361 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
3362 if (iwpbl->on_list) {
3363 iwpbl->on_list = false;
3364 list_del(&iwpbl->list);
3365 }
3366 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
3367 break;
3368 case IRDMA_MEMREG_TYPE_QP:
3369 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
3370 if (iwpbl->on_list) {
3371 iwpbl->on_list = false;
3372 list_del(&iwpbl->list);
3373 }
3374 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
3375 break;
3376 default:
3377 break;
3378 }
3379 }
3380
3381 /**
3382 * irdma_dereg_mr - deregister mr
3383 * @ib_mr: mr ptr for dereg
3384 * @udata: user data
3385 */
irdma_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)3386 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3387 {
3388 struct irdma_mr *iwmr = to_iwmr(ib_mr);
3389 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3390 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3391 int ret;
3392
3393 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3394 if (iwmr->region) {
3395 struct irdma_ucontext *ucontext;
3396
3397 ucontext = rdma_udata_to_drv_context(udata,
3398 struct irdma_ucontext,
3399 ibucontext);
3400 irdma_del_memlist(iwmr, ucontext);
3401 }
3402 goto done;
3403 }
3404
3405 ret = irdma_hwdereg_mr(ib_mr);
3406 if (ret)
3407 return ret;
3408
3409 irdma_free_stag(iwdev, iwmr->stag);
3410 done:
3411 if (iwpbl->pbl_allocated)
3412 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
3413
3414 if (iwmr->region)
3415 ib_umem_release(iwmr->region);
3416
3417 kfree(iwmr);
3418
3419 return 0;
3420 }
3421
3422 /**
3423 * irdma_post_send - kernel application wr
3424 * @ibqp: qp ptr for wr
3425 * @ib_wr: work request ptr
3426 * @bad_wr: return of bad wr if err
3427 */
irdma_post_send(struct ib_qp * ibqp,const struct ib_send_wr * ib_wr,const struct ib_send_wr ** bad_wr)3428 static int irdma_post_send(struct ib_qp *ibqp,
3429 const struct ib_send_wr *ib_wr,
3430 const struct ib_send_wr **bad_wr)
3431 {
3432 struct irdma_qp *iwqp;
3433 struct irdma_qp_uk *ukqp;
3434 struct irdma_sc_dev *dev;
3435 struct irdma_post_sq_info info;
3436 int err = 0;
3437 unsigned long flags;
3438 bool inv_stag;
3439 struct irdma_ah *ah;
3440
3441 iwqp = to_iwqp(ibqp);
3442 ukqp = &iwqp->sc_qp.qp_uk;
3443 dev = &iwqp->iwdev->rf->sc_dev;
3444
3445 spin_lock_irqsave(&iwqp->lock, flags);
3446 while (ib_wr) {
3447 memset(&info, 0, sizeof(info));
3448 inv_stag = false;
3449 info.wr_id = (ib_wr->wr_id);
3450 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3451 info.signaled = true;
3452 if (ib_wr->send_flags & IB_SEND_FENCE)
3453 info.read_fence = true;
3454 switch (ib_wr->opcode) {
3455 case IB_WR_SEND_WITH_IMM:
3456 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
3457 info.imm_data_valid = true;
3458 info.imm_data = ntohl(ib_wr->ex.imm_data);
3459 } else {
3460 err = -EINVAL;
3461 break;
3462 }
3463 fallthrough;
3464 case IB_WR_SEND:
3465 case IB_WR_SEND_WITH_INV:
3466 if (ib_wr->opcode == IB_WR_SEND ||
3467 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
3468 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3469 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
3470 else
3471 info.op_type = IRDMA_OP_TYPE_SEND;
3472 } else {
3473 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3474 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
3475 else
3476 info.op_type = IRDMA_OP_TYPE_SEND_INV;
3477 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
3478 }
3479
3480 info.op.send.num_sges = ib_wr->num_sge;
3481 info.op.send.sg_list = ib_wr->sg_list;
3482 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3483 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3484 ah = to_iwah(ud_wr(ib_wr)->ah);
3485 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
3486 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
3487 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3488 }
3489
3490 if (ib_wr->send_flags & IB_SEND_INLINE)
3491 err = irdma_uk_inline_send(ukqp, &info, false);
3492 else
3493 err = irdma_uk_send(ukqp, &info, false);
3494 break;
3495 case IB_WR_RDMA_WRITE_WITH_IMM:
3496 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
3497 info.imm_data_valid = true;
3498 info.imm_data = ntohl(ib_wr->ex.imm_data);
3499 } else {
3500 err = -EINVAL;
3501 break;
3502 }
3503 fallthrough;
3504 case IB_WR_RDMA_WRITE:
3505 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3506 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
3507 else
3508 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
3509
3510 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
3511 info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
3512 info.op.rdma_write.rem_addr.addr =
3513 rdma_wr(ib_wr)->remote_addr;
3514 info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3515 if (ib_wr->send_flags & IB_SEND_INLINE)
3516 err = irdma_uk_inline_rdma_write(ukqp, &info, false);
3517 else
3518 err = irdma_uk_rdma_write(ukqp, &info, false);
3519 break;
3520 case IB_WR_RDMA_READ_WITH_INV:
3521 inv_stag = true;
3522 fallthrough;
3523 case IB_WR_RDMA_READ:
3524 if (ib_wr->num_sge >
3525 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
3526 err = -EINVAL;
3527 break;
3528 }
3529 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
3530 info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
3531 info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3532 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
3533 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
3534 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
3535 break;
3536 case IB_WR_LOCAL_INV:
3537 info.op_type = IRDMA_OP_TYPE_INV_STAG;
3538 info.local_fence = info.read_fence;
3539 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
3540 err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
3541 break;
3542 case IB_WR_REG_MR: {
3543 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
3544 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
3545 struct irdma_fast_reg_stag_info stag_info = {};
3546
3547 stag_info.signaled = info.signaled;
3548 stag_info.read_fence = info.read_fence;
3549 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
3550 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
3551 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
3552 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
3553 stag_info.wr_id = ib_wr->wr_id;
3554 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3555 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
3556 stag_info.total_len = iwmr->ibmr.length;
3557 stag_info.reg_addr_pa = *palloc->level1.addr;
3558 stag_info.first_pm_pbl_index = palloc->level1.idx;
3559 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
3560 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
3561 stag_info.chunk_size = 1;
3562 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3563 true);
3564 break;
3565 }
3566 default:
3567 err = -EINVAL;
3568 ibdev_dbg(&iwqp->iwdev->ibdev,
3569 "VERBS: upost_send bad opcode = 0x%x\n",
3570 ib_wr->opcode);
3571 break;
3572 }
3573
3574 if (err)
3575 break;
3576 ib_wr = ib_wr->next;
3577 }
3578
3579 if (!iwqp->flush_issued) {
3580 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
3581 irdma_uk_qp_post_wr(ukqp);
3582 spin_unlock_irqrestore(&iwqp->lock, flags);
3583 } else {
3584 spin_unlock_irqrestore(&iwqp->lock, flags);
3585 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3586 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3587 }
3588 if (err)
3589 *bad_wr = ib_wr;
3590
3591 return err;
3592 }
3593
3594 /**
3595 * irdma_post_recv - post receive wr for kernel application
3596 * @ibqp: ib qp pointer
3597 * @ib_wr: work request for receive
3598 * @bad_wr: bad wr caused an error
3599 */
irdma_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * ib_wr,const struct ib_recv_wr ** bad_wr)3600 static int irdma_post_recv(struct ib_qp *ibqp,
3601 const struct ib_recv_wr *ib_wr,
3602 const struct ib_recv_wr **bad_wr)
3603 {
3604 struct irdma_qp *iwqp;
3605 struct irdma_qp_uk *ukqp;
3606 struct irdma_post_rq_info post_recv = {};
3607 unsigned long flags;
3608 int err = 0;
3609
3610 iwqp = to_iwqp(ibqp);
3611 ukqp = &iwqp->sc_qp.qp_uk;
3612
3613 spin_lock_irqsave(&iwqp->lock, flags);
3614 while (ib_wr) {
3615 post_recv.num_sges = ib_wr->num_sge;
3616 post_recv.wr_id = ib_wr->wr_id;
3617 post_recv.sg_list = ib_wr->sg_list;
3618 err = irdma_uk_post_receive(ukqp, &post_recv);
3619 if (err) {
3620 ibdev_dbg(&iwqp->iwdev->ibdev,
3621 "VERBS: post_recv err %d\n", err);
3622 goto out;
3623 }
3624
3625 ib_wr = ib_wr->next;
3626 }
3627
3628 out:
3629 spin_unlock_irqrestore(&iwqp->lock, flags);
3630 if (iwqp->flush_issued)
3631 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3632 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3633
3634 if (err)
3635 *bad_wr = ib_wr;
3636
3637 return err;
3638 }
3639
3640 /**
3641 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
3642 * @opcode: iwarp flush code
3643 */
irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)3644 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
3645 {
3646 switch (opcode) {
3647 case FLUSH_PROT_ERR:
3648 return IB_WC_LOC_PROT_ERR;
3649 case FLUSH_REM_ACCESS_ERR:
3650 return IB_WC_REM_ACCESS_ERR;
3651 case FLUSH_LOC_QP_OP_ERR:
3652 return IB_WC_LOC_QP_OP_ERR;
3653 case FLUSH_REM_OP_ERR:
3654 return IB_WC_REM_OP_ERR;
3655 case FLUSH_LOC_LEN_ERR:
3656 return IB_WC_LOC_LEN_ERR;
3657 case FLUSH_GENERAL_ERR:
3658 return IB_WC_WR_FLUSH_ERR;
3659 case FLUSH_RETRY_EXC_ERR:
3660 return IB_WC_RETRY_EXC_ERR;
3661 case FLUSH_MW_BIND_ERR:
3662 return IB_WC_MW_BIND_ERR;
3663 case FLUSH_REM_INV_REQ_ERR:
3664 return IB_WC_REM_INV_REQ_ERR;
3665 case FLUSH_FATAL_ERR:
3666 default:
3667 return IB_WC_FATAL_ERR;
3668 }
3669 }
3670
3671 /**
3672 * irdma_process_cqe - process cqe info
3673 * @entry: processed cqe
3674 * @cq_poll_info: cqe info
3675 */
irdma_process_cqe(struct ib_wc * entry,struct irdma_cq_poll_info * cq_poll_info)3676 static void irdma_process_cqe(struct ib_wc *entry,
3677 struct irdma_cq_poll_info *cq_poll_info)
3678 {
3679 struct irdma_sc_qp *qp;
3680
3681 entry->wc_flags = 0;
3682 entry->pkey_index = 0;
3683 entry->wr_id = cq_poll_info->wr_id;
3684
3685 qp = cq_poll_info->qp_handle;
3686 entry->qp = qp->qp_uk.back_qp;
3687
3688 if (cq_poll_info->error) {
3689 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
3690 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
3691
3692 entry->vendor_err = cq_poll_info->major_err << 16 |
3693 cq_poll_info->minor_err;
3694 } else {
3695 entry->status = IB_WC_SUCCESS;
3696 if (cq_poll_info->imm_valid) {
3697 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
3698 entry->wc_flags |= IB_WC_WITH_IMM;
3699 }
3700 if (cq_poll_info->ud_smac_valid) {
3701 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
3702 entry->wc_flags |= IB_WC_WITH_SMAC;
3703 }
3704
3705 if (cq_poll_info->ud_vlan_valid) {
3706 u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
3707
3708 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
3709 if (vlan) {
3710 entry->vlan_id = vlan;
3711 entry->wc_flags |= IB_WC_WITH_VLAN;
3712 }
3713 } else {
3714 entry->sl = 0;
3715 }
3716 }
3717
3718 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
3719 set_ib_wc_op_sq(cq_poll_info, entry);
3720 } else {
3721 set_ib_wc_op_rq(cq_poll_info, entry,
3722 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
3723 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
3724 cq_poll_info->stag_invalid_set) {
3725 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
3726 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
3727 }
3728 }
3729
3730 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
3731 entry->src_qp = cq_poll_info->ud_src_qpn;
3732 entry->slid = 0;
3733 entry->wc_flags |=
3734 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3735 entry->network_hdr_type = cq_poll_info->ipv4 ?
3736 RDMA_NETWORK_IPV4 :
3737 RDMA_NETWORK_IPV6;
3738 } else {
3739 entry->src_qp = cq_poll_info->qp_id;
3740 }
3741
3742 entry->byte_len = cq_poll_info->bytes_xfered;
3743 }
3744
3745 /**
3746 * irdma_poll_one - poll one entry of the CQ
3747 * @ukcq: ukcq to poll
3748 * @cur_cqe: current CQE info to be filled in
3749 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3750 *
3751 * Returns the internal irdma device error code or 0 on success
3752 */
irdma_poll_one(struct irdma_cq_uk * ukcq,struct irdma_cq_poll_info * cur_cqe,struct ib_wc * entry)3753 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
3754 struct irdma_cq_poll_info *cur_cqe,
3755 struct ib_wc *entry)
3756 {
3757 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3758
3759 if (ret)
3760 return ret;
3761
3762 irdma_process_cqe(entry, cur_cqe);
3763
3764 return 0;
3765 }
3766
3767 /**
3768 * __irdma_poll_cq - poll cq for completion (kernel apps)
3769 * @iwcq: cq to poll
3770 * @num_entries: number of entries to poll
3771 * @entry: wr of a completed entry
3772 */
__irdma_poll_cq(struct irdma_cq * iwcq,int num_entries,struct ib_wc * entry)3773 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3774 {
3775 struct list_head *tmp_node, *list_node;
3776 struct irdma_cq_buf *last_buf = NULL;
3777 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3778 struct irdma_cq_buf *cq_buf;
3779 int ret;
3780 struct irdma_device *iwdev;
3781 struct irdma_cq_uk *ukcq;
3782 bool cq_new_cqe = false;
3783 int resized_bufs = 0;
3784 int npolled = 0;
3785
3786 iwdev = to_iwdev(iwcq->ibcq.device);
3787 ukcq = &iwcq->sc_cq.cq_uk;
3788
3789 /* go through the list of previously resized CQ buffers */
3790 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3791 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3792 while (npolled < num_entries) {
3793 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3794 if (!ret) {
3795 ++npolled;
3796 cq_new_cqe = true;
3797 continue;
3798 }
3799 if (ret == -ENOENT)
3800 break;
3801 /* QP using the CQ is destroyed. Skip reporting this CQE */
3802 if (ret == -EFAULT) {
3803 cq_new_cqe = true;
3804 continue;
3805 }
3806 goto error;
3807 }
3808
3809 /* save the resized CQ buffer which received the last cqe */
3810 if (cq_new_cqe)
3811 last_buf = cq_buf;
3812 cq_new_cqe = false;
3813 }
3814
3815 /* check the current CQ for new cqes */
3816 while (npolled < num_entries) {
3817 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3818 if (ret == -ENOENT) {
3819 ret = irdma_generated_cmpls(iwcq, cur_cqe);
3820 if (!ret)
3821 irdma_process_cqe(entry + npolled, cur_cqe);
3822 }
3823 if (!ret) {
3824 ++npolled;
3825 cq_new_cqe = true;
3826 continue;
3827 }
3828
3829 if (ret == -ENOENT)
3830 break;
3831 /* QP using the CQ is destroyed. Skip reporting this CQE */
3832 if (ret == -EFAULT) {
3833 cq_new_cqe = true;
3834 continue;
3835 }
3836 goto error;
3837 }
3838
3839 if (cq_new_cqe)
3840 /* all previous CQ resizes are complete */
3841 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3842 else if (last_buf)
3843 /* only CQ resizes up to the last_buf are complete */
3844 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3845 if (resized_bufs)
3846 /* report to the HW the number of complete CQ resizes */
3847 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3848
3849 return npolled;
3850 error:
3851 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3852 __func__, ret);
3853
3854 return ret;
3855 }
3856
3857 /**
3858 * irdma_poll_cq - poll cq for completion (kernel apps)
3859 * @ibcq: cq to poll
3860 * @num_entries: number of entries to poll
3861 * @entry: wr of a completed entry
3862 */
irdma_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)3863 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3864 struct ib_wc *entry)
3865 {
3866 struct irdma_cq *iwcq;
3867 unsigned long flags;
3868 int ret;
3869
3870 iwcq = to_iwcq(ibcq);
3871
3872 spin_lock_irqsave(&iwcq->lock, flags);
3873 ret = __irdma_poll_cq(iwcq, num_entries, entry);
3874 spin_unlock_irqrestore(&iwcq->lock, flags);
3875
3876 return ret;
3877 }
3878
3879 /**
3880 * irdma_req_notify_cq - arm cq kernel application
3881 * @ibcq: cq to arm
3882 * @notify_flags: notofication flags
3883 */
irdma_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)3884 static int irdma_req_notify_cq(struct ib_cq *ibcq,
3885 enum ib_cq_notify_flags notify_flags)
3886 {
3887 struct irdma_cq *iwcq;
3888 struct irdma_cq_uk *ukcq;
3889 unsigned long flags;
3890 enum irdma_cmpl_notify cq_notify;
3891 bool promo_event = false;
3892 int ret = 0;
3893
3894 cq_notify = notify_flags == IB_CQ_SOLICITED ?
3895 IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
3896 iwcq = to_iwcq(ibcq);
3897 ukcq = &iwcq->sc_cq.cq_uk;
3898
3899 spin_lock_irqsave(&iwcq->lock, flags);
3900 /* Only promote to arm the CQ for any event if the last arm event was solicited. */
3901 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
3902 promo_event = true;
3903
3904 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3905 iwcq->last_notify = cq_notify;
3906 irdma_uk_cq_request_notification(ukcq, cq_notify);
3907 }
3908
3909 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3910 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3911 ret = 1;
3912 spin_unlock_irqrestore(&iwcq->lock, flags);
3913
3914 return ret;
3915 }
3916
irdma_roce_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)3917 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
3918 struct ib_port_immutable *immutable)
3919 {
3920 struct ib_port_attr attr;
3921 int err;
3922
3923 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3924 err = ib_query_port(ibdev, port_num, &attr);
3925 if (err)
3926 return err;
3927
3928 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3929 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3930 immutable->gid_tbl_len = attr.gid_tbl_len;
3931
3932 return 0;
3933 }
3934
irdma_iw_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)3935 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
3936 struct ib_port_immutable *immutable)
3937 {
3938 struct ib_port_attr attr;
3939 int err;
3940
3941 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
3942 err = ib_query_port(ibdev, port_num, &attr);
3943 if (err)
3944 return err;
3945 immutable->gid_tbl_len = attr.gid_tbl_len;
3946
3947 return 0;
3948 }
3949
3950 static const struct rdma_stat_desc irdma_hw_stat_names[] = {
3951 /* gen1 - 32-bit */
3952 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
3953 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
3954 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
3955 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
3956 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
3957 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
3958 [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
3959 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
3960 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
3961 [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
3962 /* gen1 - 64-bit */
3963 [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
3964 [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts",
3965 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd",
3966 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts",
3967 [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets",
3968 [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts",
3969 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd",
3970 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts",
3971 [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets",
3972 [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts",
3973 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd",
3974 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts",
3975 [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets",
3976 [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
3977 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
3978 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
3979 [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs",
3980 [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs",
3981 [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads",
3982 [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends",
3983 [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites",
3984 [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads",
3985 [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends",
3986 [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites",
3987 [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd",
3988 [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv",
3989
3990 /* gen2 - 32-bit */
3991 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
3992 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
3993 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
3994 /* gen2 - 64-bit */
3995 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets",
3996 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets",
3997 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets",
3998 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets",
3999 [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
4000 [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
4001 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
4002
4003 };
4004
irdma_get_dev_fw_str(struct ib_device * dev,char * str)4005 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
4006 {
4007 struct irdma_device *iwdev = to_iwdev(dev);
4008
4009 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
4010 irdma_fw_major_ver(&iwdev->rf->sc_dev),
4011 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
4012 }
4013
4014 /**
4015 * irdma_alloc_hw_port_stats - Allocate a hw stats structure
4016 * @ibdev: device pointer from stack
4017 * @port_num: port number
4018 */
irdma_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)4019 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
4020 u32 port_num)
4021 {
4022 struct irdma_device *iwdev = to_iwdev(ibdev);
4023 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
4024
4025 int num_counters = dev->hw_attrs.max_stat_idx;
4026 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
4027
4028 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
4029 lifespan);
4030 }
4031
4032 /**
4033 * irdma_get_hw_stats - Populates the rdma_hw_stats structure
4034 * @ibdev: device pointer from stack
4035 * @stats: stats pointer from stack
4036 * @port_num: port number
4037 * @index: which hw counter the stack is requesting we update
4038 */
irdma_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port_num,int index)4039 static int irdma_get_hw_stats(struct ib_device *ibdev,
4040 struct rdma_hw_stats *stats, u32 port_num,
4041 int index)
4042 {
4043 struct irdma_device *iwdev = to_iwdev(ibdev);
4044 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
4045
4046 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
4047 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
4048 else
4049 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
4050
4051 memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
4052
4053 return stats->num_counters;
4054 }
4055
4056 /**
4057 * irdma_query_gid - Query port GID
4058 * @ibdev: device pointer from stack
4059 * @port: port number
4060 * @index: Entry index
4061 * @gid: Global ID
4062 */
irdma_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)4063 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
4064 union ib_gid *gid)
4065 {
4066 struct irdma_device *iwdev = to_iwdev(ibdev);
4067
4068 memset(gid->raw, 0, sizeof(gid->raw));
4069 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
4070
4071 return 0;
4072 }
4073
4074 /**
4075 * mcast_list_add - Add a new mcast item to list
4076 * @rf: RDMA PCI function
4077 * @new_elem: pointer to element to add
4078 */
mcast_list_add(struct irdma_pci_f * rf,struct mc_table_list * new_elem)4079 static void mcast_list_add(struct irdma_pci_f *rf,
4080 struct mc_table_list *new_elem)
4081 {
4082 list_add(&new_elem->list, &rf->mc_qht_list.list);
4083 }
4084
4085 /**
4086 * mcast_list_del - Remove an mcast item from list
4087 * @mc_qht_elem: pointer to mcast table list element
4088 */
mcast_list_del(struct mc_table_list * mc_qht_elem)4089 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
4090 {
4091 if (mc_qht_elem)
4092 list_del(&mc_qht_elem->list);
4093 }
4094
4095 /**
4096 * mcast_list_lookup_ip - Search mcast list for address
4097 * @rf: RDMA PCI function
4098 * @ip_mcast: pointer to mcast IP address
4099 */
mcast_list_lookup_ip(struct irdma_pci_f * rf,u32 * ip_mcast)4100 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
4101 u32 *ip_mcast)
4102 {
4103 struct mc_table_list *mc_qht_el;
4104 struct list_head *pos, *q;
4105
4106 list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
4107 mc_qht_el = list_entry(pos, struct mc_table_list, list);
4108 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
4109 sizeof(mc_qht_el->mc_info.dest_ip)))
4110 return mc_qht_el;
4111 }
4112
4113 return NULL;
4114 }
4115
4116 /**
4117 * irdma_mcast_cqp_op - perform a mcast cqp operation
4118 * @iwdev: irdma device
4119 * @mc_grp_ctx: mcast group info
4120 * @op: operation
4121 *
4122 * returns error status
4123 */
irdma_mcast_cqp_op(struct irdma_device * iwdev,struct irdma_mcast_grp_info * mc_grp_ctx,u8 op)4124 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
4125 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
4126 {
4127 struct cqp_cmds_info *cqp_info;
4128 struct irdma_cqp_request *cqp_request;
4129 int status;
4130
4131 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
4132 if (!cqp_request)
4133 return -ENOMEM;
4134
4135 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
4136 cqp_info = &cqp_request->info;
4137 cqp_info->cqp_cmd = op;
4138 cqp_info->post_sq = 1;
4139 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
4140 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
4141 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
4142 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
4143
4144 return status;
4145 }
4146
4147 /**
4148 * irdma_mcast_mac - Get the multicast MAC for an IP address
4149 * @ip_addr: IPv4 or IPv6 address
4150 * @mac: pointer to result MAC address
4151 * @ipv4: flag indicating IPv4 or IPv6
4152 *
4153 */
irdma_mcast_mac(u32 * ip_addr,u8 * mac,bool ipv4)4154 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
4155 {
4156 u8 *ip = (u8 *)ip_addr;
4157
4158 if (ipv4) {
4159 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
4160 0x00, 0x00};
4161
4162 mac4[3] = ip[2] & 0x7F;
4163 mac4[4] = ip[1];
4164 mac4[5] = ip[0];
4165 ether_addr_copy(mac, mac4);
4166 } else {
4167 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
4168 0x00, 0x00};
4169
4170 mac6[2] = ip[3];
4171 mac6[3] = ip[2];
4172 mac6[4] = ip[1];
4173 mac6[5] = ip[0];
4174 ether_addr_copy(mac, mac6);
4175 }
4176 }
4177
4178 /**
4179 * irdma_attach_mcast - attach a qp to a multicast group
4180 * @ibqp: ptr to qp
4181 * @ibgid: pointer to global ID
4182 * @lid: local ID
4183 *
4184 * returns error status
4185 */
irdma_attach_mcast(struct ib_qp * ibqp,union ib_gid * ibgid,u16 lid)4186 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4187 {
4188 struct irdma_qp *iwqp = to_iwqp(ibqp);
4189 struct irdma_device *iwdev = iwqp->iwdev;
4190 struct irdma_pci_f *rf = iwdev->rf;
4191 struct mc_table_list *mc_qht_elem;
4192 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4193 unsigned long flags;
4194 u32 ip_addr[4] = {};
4195 u32 mgn;
4196 u32 no_mgs;
4197 int ret = 0;
4198 bool ipv4;
4199 u16 vlan_id;
4200 union irdma_sockaddr sgid_addr;
4201 unsigned char dmac[ETH_ALEN];
4202
4203 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4204
4205 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
4206 irdma_copy_ip_ntohl(ip_addr,
4207 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4208 irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
4209 ipv4 = false;
4210 ibdev_dbg(&iwdev->ibdev,
4211 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
4212 ip_addr);
4213 irdma_mcast_mac(ip_addr, dmac, false);
4214 } else {
4215 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4216 ipv4 = true;
4217 vlan_id = irdma_get_vlan_ipv4(ip_addr);
4218 irdma_mcast_mac(ip_addr, dmac, true);
4219 ibdev_dbg(&iwdev->ibdev,
4220 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
4221 ibqp->qp_num, ip_addr, dmac);
4222 }
4223
4224 spin_lock_irqsave(&rf->qh_list_lock, flags);
4225 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4226 if (!mc_qht_elem) {
4227 struct irdma_dma_mem *dma_mem_mc;
4228
4229 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4230 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
4231 if (!mc_qht_elem)
4232 return -ENOMEM;
4233
4234 mc_qht_elem->mc_info.ipv4_valid = ipv4;
4235 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
4236 sizeof(mc_qht_elem->mc_info.dest_ip));
4237 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
4238 &mgn, &rf->next_mcg);
4239 if (ret) {
4240 kfree(mc_qht_elem);
4241 return -ENOMEM;
4242 }
4243
4244 mc_qht_elem->mc_info.mgn = mgn;
4245 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4246 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4247 IRDMA_HW_PAGE_SIZE);
4248 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4249 dma_mem_mc->size,
4250 &dma_mem_mc->pa,
4251 GFP_KERNEL);
4252 if (!dma_mem_mc->va) {
4253 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4254 kfree(mc_qht_elem);
4255 return -ENOMEM;
4256 }
4257
4258 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4259 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4260 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4261 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4262 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4263 if (vlan_id < VLAN_N_VID)
4264 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4265 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
4266 mc_qht_elem->mc_grp_ctx.qs_handle =
4267 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4268 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4269
4270 spin_lock_irqsave(&rf->qh_list_lock, flags);
4271 mcast_list_add(rf, mc_qht_elem);
4272 } else {
4273 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4274 IRDMA_MAX_MGS_PER_CTX) {
4275 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4276 return -ENOMEM;
4277 }
4278 }
4279
4280 mcg_info.qp_id = iwqp->ibqp.qp_num;
4281 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4282 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4283 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4284
4285 /* Only if there is a change do we need to modify or create */
4286 if (!no_mgs) {
4287 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4288 IRDMA_OP_MC_CREATE);
4289 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4290 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4291 IRDMA_OP_MC_MODIFY);
4292 } else {
4293 return 0;
4294 }
4295
4296 if (ret)
4297 goto error;
4298
4299 return 0;
4300
4301 error:
4302 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4303 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4304 mcast_list_del(mc_qht_elem);
4305 dma_free_coherent(rf->hw.device,
4306 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4307 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4308 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4309 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4310 irdma_free_rsrc(rf, rf->allocated_mcgs,
4311 mc_qht_elem->mc_grp_ctx.mg_id);
4312 kfree(mc_qht_elem);
4313 }
4314
4315 return ret;
4316 }
4317
4318 /**
4319 * irdma_detach_mcast - detach a qp from a multicast group
4320 * @ibqp: ptr to qp
4321 * @ibgid: pointer to global ID
4322 * @lid: local ID
4323 *
4324 * returns error status
4325 */
irdma_detach_mcast(struct ib_qp * ibqp,union ib_gid * ibgid,u16 lid)4326 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4327 {
4328 struct irdma_qp *iwqp = to_iwqp(ibqp);
4329 struct irdma_device *iwdev = iwqp->iwdev;
4330 struct irdma_pci_f *rf = iwdev->rf;
4331 u32 ip_addr[4] = {};
4332 struct mc_table_list *mc_qht_elem;
4333 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4334 int ret;
4335 unsigned long flags;
4336 union irdma_sockaddr sgid_addr;
4337
4338 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4339 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4340 irdma_copy_ip_ntohl(ip_addr,
4341 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4342 else
4343 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4344
4345 spin_lock_irqsave(&rf->qh_list_lock, flags);
4346 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4347 if (!mc_qht_elem) {
4348 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4349 ibdev_dbg(&iwdev->ibdev,
4350 "VERBS: address not found MCG\n");
4351 return 0;
4352 }
4353
4354 mcg_info.qp_id = iwqp->ibqp.qp_num;
4355 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4356 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4357 mcast_list_del(mc_qht_elem);
4358 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4359 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4360 IRDMA_OP_MC_DESTROY);
4361 if (ret) {
4362 ibdev_dbg(&iwdev->ibdev,
4363 "VERBS: failed MC_DESTROY MCG\n");
4364 spin_lock_irqsave(&rf->qh_list_lock, flags);
4365 mcast_list_add(rf, mc_qht_elem);
4366 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4367 return -EAGAIN;
4368 }
4369
4370 dma_free_coherent(rf->hw.device,
4371 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4372 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4373 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4374 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4375 irdma_free_rsrc(rf, rf->allocated_mcgs,
4376 mc_qht_elem->mc_grp_ctx.mg_id);
4377 kfree(mc_qht_elem);
4378 } else {
4379 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4380 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4381 IRDMA_OP_MC_MODIFY);
4382 if (ret) {
4383 ibdev_dbg(&iwdev->ibdev,
4384 "VERBS: failed Modify MCG\n");
4385 return ret;
4386 }
4387 }
4388
4389 return 0;
4390 }
4391
irdma_create_hw_ah(struct irdma_device * iwdev,struct irdma_ah * ah,bool sleep)4392 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
4393 {
4394 struct irdma_pci_f *rf = iwdev->rf;
4395 int err;
4396
4397 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
4398 &rf->next_ah);
4399 if (err)
4400 return err;
4401
4402 err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
4403 irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
4404
4405 if (err) {
4406 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
4407 goto err_ah_create;
4408 }
4409
4410 if (!sleep) {
4411 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
4412
4413 do {
4414 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
4415 mdelay(1);
4416 } while (!ah->sc_ah.ah_info.ah_valid && --cnt);
4417
4418 if (!cnt) {
4419 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
4420 err = -ETIMEDOUT;
4421 goto err_ah_create;
4422 }
4423 }
4424 return 0;
4425
4426 err_ah_create:
4427 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
4428
4429 return err;
4430 }
4431
irdma_setup_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * attr)4432 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
4433 {
4434 struct irdma_pd *pd = to_iwpd(ibah->pd);
4435 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4436 struct rdma_ah_attr *ah_attr = attr->ah_attr;
4437 const struct ib_gid_attr *sgid_attr;
4438 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4439 struct irdma_pci_f *rf = iwdev->rf;
4440 struct irdma_sc_ah *sc_ah;
4441 struct irdma_ah_info *ah_info;
4442 union irdma_sockaddr sgid_addr, dgid_addr;
4443 int err;
4444 u8 dmac[ETH_ALEN];
4445
4446 ah->pd = pd;
4447 sc_ah = &ah->sc_ah;
4448 sc_ah->ah_info.vsi = &iwdev->vsi;
4449 irdma_sc_init_ah(&rf->sc_dev, sc_ah);
4450 ah->sgid_index = ah_attr->grh.sgid_index;
4451 sgid_attr = ah_attr->grh.sgid_attr;
4452 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
4453 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
4454 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
4455 ah->av.attrs = *ah_attr;
4456 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
4457 ah_info = &sc_ah->ah_info;
4458 ah_info->pd_idx = pd->sc_pd.pd_id;
4459 if (ah_attr->ah_flags & IB_AH_GRH) {
4460 ah_info->flow_label = ah_attr->grh.flow_label;
4461 ah_info->hop_ttl = ah_attr->grh.hop_limit;
4462 ah_info->tc_tos = ah_attr->grh.traffic_class;
4463 }
4464
4465 ether_addr_copy(dmac, ah_attr->roce.dmac);
4466 if (ah->av.net_type == RDMA_NETWORK_IPV4) {
4467 ah_info->ipv4_valid = true;
4468 ah_info->dest_ip_addr[0] =
4469 ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
4470 ah_info->src_ip_addr[0] =
4471 ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4472 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
4473 ah_info->dest_ip_addr[0]);
4474 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
4475 ah_info->do_lpbk = true;
4476 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
4477 }
4478 } else {
4479 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
4480 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4481 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
4482 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4483 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
4484 ah_info->dest_ip_addr);
4485 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
4486 ah_info->do_lpbk = true;
4487 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
4488 }
4489 }
4490
4491 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
4492 ah_info->mac_addr);
4493 if (err)
4494 return err;
4495
4496 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4497 ah_info->ipv4_valid, dmac);
4498
4499 if (ah_info->dst_arpindex == -1)
4500 return -EINVAL;
4501
4502 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
4503 ah_info->vlan_tag = 0;
4504
4505 if (ah_info->vlan_tag < VLAN_N_VID) {
4506 u8 prio = rt_tos2priority(ah_info->tc_tos);
4507
4508 prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
4509
4510 ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
4511 ah_info->insert_vlan_tag = true;
4512 }
4513
4514 return 0;
4515 }
4516
4517 /**
4518 * irdma_ah_exists - Check for existing identical AH
4519 * @iwdev: irdma device
4520 * @new_ah: AH to check for
4521 *
4522 * returns true if AH is found, false if not found.
4523 */
irdma_ah_exists(struct irdma_device * iwdev,struct irdma_ah * new_ah)4524 static bool irdma_ah_exists(struct irdma_device *iwdev,
4525 struct irdma_ah *new_ah)
4526 {
4527 struct irdma_ah *ah;
4528 u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4529 new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4530 new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4531 new_ah->sc_ah.ah_info.dest_ip_addr[3];
4532
4533 hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
4534 /* Set ah_valid and ah_id the same so memcmp can work */
4535 new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
4536 new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
4537 if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
4538 sizeof(ah->sc_ah.ah_info))) {
4539 refcount_inc(&ah->refcnt);
4540 new_ah->parent_ah = ah;
4541 return true;
4542 }
4543 }
4544
4545 return false;
4546 }
4547
4548 /**
4549 * irdma_destroy_ah - Destroy address handle
4550 * @ibah: pointer to address handle
4551 * @ah_flags: flags for sleepable
4552 */
irdma_destroy_ah(struct ib_ah * ibah,u32 ah_flags)4553 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
4554 {
4555 struct irdma_device *iwdev = to_iwdev(ibah->device);
4556 struct irdma_ah *ah = to_iwah(ibah);
4557
4558 if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
4559 mutex_lock(&iwdev->ah_tbl_lock);
4560 if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
4561 mutex_unlock(&iwdev->ah_tbl_lock);
4562 return 0;
4563 }
4564 hash_del(&ah->parent_ah->list);
4565 kfree(ah->parent_ah);
4566 mutex_unlock(&iwdev->ah_tbl_lock);
4567 }
4568
4569 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4570 false, NULL, ah);
4571
4572 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4573 ah->sc_ah.ah_info.ah_idx);
4574
4575 return 0;
4576 }
4577
4578 /**
4579 * irdma_create_user_ah - create user address handle
4580 * @ibah: address handle
4581 * @attr: address handle attributes
4582 * @udata: User data
4583 *
4584 * returns 0 on success, error otherwise
4585 */
irdma_create_user_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * attr,struct ib_udata * udata)4586 static int irdma_create_user_ah(struct ib_ah *ibah,
4587 struct rdma_ah_init_attr *attr,
4588 struct ib_udata *udata)
4589 {
4590 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
4591 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4592 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4593 struct irdma_create_ah_resp uresp;
4594 struct irdma_ah *parent_ah;
4595 int err;
4596
4597 if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
4598 return -EINVAL;
4599
4600 err = irdma_setup_ah(ibah, attr);
4601 if (err)
4602 return err;
4603 mutex_lock(&iwdev->ah_tbl_lock);
4604 if (!irdma_ah_exists(iwdev, ah)) {
4605 err = irdma_create_hw_ah(iwdev, ah, true);
4606 if (err) {
4607 mutex_unlock(&iwdev->ah_tbl_lock);
4608 return err;
4609 }
4610 /* Add new AH to list */
4611 parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
4612 if (parent_ah) {
4613 u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4614 parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4615 parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4616 parent_ah->sc_ah.ah_info.dest_ip_addr[3];
4617
4618 ah->parent_ah = parent_ah;
4619 hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
4620 refcount_set(&parent_ah->refcnt, 1);
4621 }
4622 }
4623 mutex_unlock(&iwdev->ah_tbl_lock);
4624
4625 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
4626 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
4627 if (err)
4628 irdma_destroy_ah(ibah, attr->flags);
4629
4630 return err;
4631 }
4632
4633 /**
4634 * irdma_create_ah - create address handle
4635 * @ibah: address handle
4636 * @attr: address handle attributes
4637 * @udata: NULL
4638 *
4639 * returns 0 on success, error otherwise
4640 */
irdma_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * attr,struct ib_udata * udata)4641 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
4642 struct ib_udata *udata)
4643 {
4644 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4645 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4646 int err;
4647
4648 err = irdma_setup_ah(ibah, attr);
4649 if (err)
4650 return err;
4651 err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
4652
4653 return err;
4654 }
4655
4656 /**
4657 * irdma_query_ah - Query address handle
4658 * @ibah: pointer to address handle
4659 * @ah_attr: address handle attributes
4660 */
irdma_query_ah(struct ib_ah * ibah,struct rdma_ah_attr * ah_attr)4661 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
4662 {
4663 struct irdma_ah *ah = to_iwah(ibah);
4664
4665 memset(ah_attr, 0, sizeof(*ah_attr));
4666 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
4667 ah_attr->ah_flags = IB_AH_GRH;
4668 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
4669 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
4670 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
4671 ah_attr->grh.sgid_index = ah->sgid_index;
4672 memcpy(&ah_attr->grh.dgid, &ah->dgid,
4673 sizeof(ah_attr->grh.dgid));
4674 }
4675
4676 return 0;
4677 }
4678
irdma_get_link_layer(struct ib_device * ibdev,u32 port_num)4679 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
4680 u32 port_num)
4681 {
4682 return IB_LINK_LAYER_ETHERNET;
4683 }
4684
4685 static const struct ib_device_ops irdma_roce_dev_ops = {
4686 .attach_mcast = irdma_attach_mcast,
4687 .create_ah = irdma_create_ah,
4688 .create_user_ah = irdma_create_user_ah,
4689 .destroy_ah = irdma_destroy_ah,
4690 .detach_mcast = irdma_detach_mcast,
4691 .get_link_layer = irdma_get_link_layer,
4692 .get_port_immutable = irdma_roce_port_immutable,
4693 .modify_qp = irdma_modify_qp_roce,
4694 .query_ah = irdma_query_ah,
4695 .query_pkey = irdma_query_pkey,
4696 };
4697
4698 static const struct ib_device_ops irdma_iw_dev_ops = {
4699 .get_port_immutable = irdma_iw_port_immutable,
4700 .iw_accept = irdma_accept,
4701 .iw_add_ref = irdma_qp_add_ref,
4702 .iw_connect = irdma_connect,
4703 .iw_create_listen = irdma_create_listen,
4704 .iw_destroy_listen = irdma_destroy_listen,
4705 .iw_get_qp = irdma_get_qp,
4706 .iw_reject = irdma_reject,
4707 .iw_rem_ref = irdma_qp_rem_ref,
4708 .modify_qp = irdma_modify_qp,
4709 .query_gid = irdma_query_gid,
4710 };
4711
4712 static const struct ib_device_ops irdma_dev_ops = {
4713 .owner = THIS_MODULE,
4714 .driver_id = RDMA_DRIVER_IRDMA,
4715 .uverbs_abi_ver = IRDMA_ABI_VER,
4716
4717 .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
4718 .alloc_mr = irdma_alloc_mr,
4719 .alloc_mw = irdma_alloc_mw,
4720 .alloc_pd = irdma_alloc_pd,
4721 .alloc_ucontext = irdma_alloc_ucontext,
4722 .create_cq = irdma_create_cq,
4723 .create_qp = irdma_create_qp,
4724 .dealloc_driver = irdma_ib_dealloc_device,
4725 .dealloc_mw = irdma_dealloc_mw,
4726 .dealloc_pd = irdma_dealloc_pd,
4727 .dealloc_ucontext = irdma_dealloc_ucontext,
4728 .dereg_mr = irdma_dereg_mr,
4729 .destroy_cq = irdma_destroy_cq,
4730 .destroy_qp = irdma_destroy_qp,
4731 .disassociate_ucontext = irdma_disassociate_ucontext,
4732 .get_dev_fw_str = irdma_get_dev_fw_str,
4733 .get_dma_mr = irdma_get_dma_mr,
4734 .get_hw_stats = irdma_get_hw_stats,
4735 .map_mr_sg = irdma_map_mr_sg,
4736 .mmap = irdma_mmap,
4737 .mmap_free = irdma_mmap_free,
4738 .poll_cq = irdma_poll_cq,
4739 .post_recv = irdma_post_recv,
4740 .post_send = irdma_post_send,
4741 .query_device = irdma_query_device,
4742 .query_port = irdma_query_port,
4743 .query_qp = irdma_query_qp,
4744 .reg_user_mr = irdma_reg_user_mr,
4745 .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
4746 .rereg_user_mr = irdma_rereg_user_mr,
4747 .req_notify_cq = irdma_req_notify_cq,
4748 .resize_cq = irdma_resize_cq,
4749 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
4750 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
4751 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
4752 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
4753 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
4754 INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
4755 };
4756
4757 /**
4758 * irdma_init_roce_device - initialization of roce rdma device
4759 * @iwdev: irdma device
4760 */
irdma_init_roce_device(struct irdma_device * iwdev)4761 static void irdma_init_roce_device(struct irdma_device *iwdev)
4762 {
4763 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4764 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4765 iwdev->netdev->dev_addr);
4766 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4767 }
4768
4769 /**
4770 * irdma_init_iw_device - initialization of iwarp rdma device
4771 * @iwdev: irdma device
4772 */
irdma_init_iw_device(struct irdma_device * iwdev)4773 static void irdma_init_iw_device(struct irdma_device *iwdev)
4774 {
4775 struct net_device *netdev = iwdev->netdev;
4776
4777 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4778 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4779 netdev->dev_addr);
4780 memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4781 sizeof(iwdev->ibdev.iw_ifname));
4782 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4783 }
4784
4785 /**
4786 * irdma_init_rdma_device - initialization of rdma device
4787 * @iwdev: irdma device
4788 */
irdma_init_rdma_device(struct irdma_device * iwdev)4789 static void irdma_init_rdma_device(struct irdma_device *iwdev)
4790 {
4791 struct pci_dev *pcidev = iwdev->rf->pcidev;
4792
4793 if (iwdev->roce_mode)
4794 irdma_init_roce_device(iwdev);
4795 else
4796 irdma_init_iw_device(iwdev);
4797
4798 iwdev->ibdev.phys_port_cnt = 1;
4799 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4800 iwdev->ibdev.dev.parent = &pcidev->dev;
4801 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4802 }
4803
4804 /**
4805 * irdma_port_ibevent - indicate port event
4806 * @iwdev: irdma device
4807 */
irdma_port_ibevent(struct irdma_device * iwdev)4808 void irdma_port_ibevent(struct irdma_device *iwdev)
4809 {
4810 struct ib_event event;
4811
4812 event.device = &iwdev->ibdev;
4813 event.element.port_num = 1;
4814 event.event =
4815 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4816 ib_dispatch_event(&event);
4817 }
4818
4819 /**
4820 * irdma_ib_unregister_device - unregister rdma device from IB
4821 * core
4822 * @iwdev: irdma device
4823 */
irdma_ib_unregister_device(struct irdma_device * iwdev)4824 void irdma_ib_unregister_device(struct irdma_device *iwdev)
4825 {
4826 iwdev->iw_status = 0;
4827 irdma_port_ibevent(iwdev);
4828 ib_unregister_device(&iwdev->ibdev);
4829 }
4830
4831 /**
4832 * irdma_ib_register_device - register irdma device to IB core
4833 * @iwdev: irdma device
4834 */
irdma_ib_register_device(struct irdma_device * iwdev)4835 int irdma_ib_register_device(struct irdma_device *iwdev)
4836 {
4837 int ret;
4838
4839 irdma_init_rdma_device(iwdev);
4840
4841 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4842 if (ret)
4843 goto error;
4844 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4845 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4846 if (ret)
4847 goto error;
4848
4849 iwdev->iw_status = 1;
4850 irdma_port_ibevent(iwdev);
4851
4852 return 0;
4853
4854 error:
4855 if (ret)
4856 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4857
4858 return ret;
4859 }
4860
4861 /**
4862 * irdma_ib_dealloc_device
4863 * @ibdev: ib device
4864 *
4865 * callback from ibdev dealloc_driver to deallocate resources
4866 * unber irdma device
4867 */
irdma_ib_dealloc_device(struct ib_device * ibdev)4868 void irdma_ib_dealloc_device(struct ib_device *ibdev)
4869 {
4870 struct irdma_device *iwdev = to_iwdev(ibdev);
4871
4872 irdma_rt_deinit_hw(iwdev);
4873 irdma_ctrl_deinit_hw(iwdev->rf);
4874 kfree(iwdev->rf);
4875 }
4876