1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2022 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /*$FreeBSD$*/ 35 36 #include "irdma_main.h" 37 38 /** 39 * irdma_query_device - get device attributes 40 * @ibdev: device pointer from stack 41 * @props: returning device attributes 42 * @udata: user data 43 */ 44 static int 45 irdma_query_device(struct ib_device *ibdev, 46 struct ib_device_attr *props, 47 struct ib_udata *udata) 48 { 49 struct irdma_device *iwdev = to_iwdev(ibdev); 50 struct irdma_pci_f *rf = iwdev->rf; 51 struct pci_dev *pcidev = iwdev->rf->pcidev; 52 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs; 53 54 if (udata->inlen || udata->outlen) 55 return -EINVAL; 56 57 memset(props, 0, sizeof(*props)); 58 addrconf_addr_eui48((u8 *)&props->sys_image_guid, 59 IF_LLADDR(iwdev->netdev)); 60 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | 61 irdma_fw_minor_ver(&rf->sc_dev); 62 props->device_cap_flags = IB_DEVICE_MEM_WINDOW | 63 IB_DEVICE_MEM_MGT_EXTENSIONS; 64 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 65 props->vendor_id = pcidev->vendor; 66 props->vendor_part_id = pcidev->device; 67 props->hw_ver = pcidev->revision; 68 props->page_size_cap = hw_attrs->page_size_cap; 69 props->max_mr_size = hw_attrs->max_mr_size; 70 props->max_qp = rf->max_qp - rf->used_qps; 71 props->max_qp_wr = hw_attrs->max_qp_wr; 72 set_max_sge(props, rf); 73 props->max_cq = rf->max_cq - rf->used_cqs; 74 props->max_cqe = rf->max_cqe - 1; 75 props->max_mr = rf->max_mr - rf->used_mrs; 76 props->max_mw = props->max_mr; 77 props->max_pd = rf->max_pd - rf->used_pds; 78 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges; 79 props->max_qp_rd_atom = hw_attrs->max_hw_ird; 80 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord; 81 if (rdma_protocol_roce(ibdev, 1)) { 82 props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN; 83 props->max_pkeys = IRDMA_PKEY_TBL_SZ; 84 props->max_ah = rf->max_ah; 85 if (hw_attrs->uk_attrs.hw_rev == IRDMA_GEN_2) { 86 props->max_mcast_grp = rf->max_mcg; 87 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX; 88 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX; 89 } 90 } 91 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR; 92 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2) 93 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; 94 95 return 0; 96 } 97 98 static int 99 irdma_mmap_legacy(struct irdma_ucontext *ucontext, 100 struct vm_area_struct *vma) 101 { 102 u64 pfn; 103 104 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) 105 return -EINVAL; 106 107 vma->vm_private_data = ucontext; 108 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] + 109 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 110 111 #if __FreeBSD_version >= 1400026 112 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, 113 pgprot_noncached(vma->vm_page_prot), NULL); 114 #else 115 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE, 116 pgprot_noncached(vma->vm_page_prot)); 117 #endif 118 } 119 120 #if __FreeBSD_version >= 1400026 121 static void 122 irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 123 { 124 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry); 125 126 kfree(entry); 127 } 128 129 struct rdma_user_mmap_entry * 130 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset, 131 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset) 132 { 133 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 134 int ret; 135 136 if (!entry) 137 return NULL; 138 139 entry->bar_offset = bar_offset; 140 entry->mmap_flag = mmap_flag; 141 142 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext, 143 &entry->rdma_entry, PAGE_SIZE); 144 if (ret) { 145 kfree(entry); 146 return NULL; 147 } 148 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 149 150 return &entry->rdma_entry; 151 } 152 153 #else 154 static inline bool 155 find_key_in_mmap_tbl(struct irdma_ucontext *ucontext, u64 key) 156 { 157 struct irdma_user_mmap_entry *entry; 158 159 HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, key) { 160 if (entry->pgoff_key == key) 161 return true; 162 } 163 164 return false; 165 } 166 167 struct irdma_user_mmap_entry * 168 irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset, 169 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset) 170 { 171 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 172 unsigned long flags; 173 int retry_cnt = 0; 174 175 if (!entry) 176 return NULL; 177 178 entry->bar_offset = bar_offset; 179 entry->mmap_flag = mmap_flag; 180 entry->ucontext = ucontext; 181 do { 182 get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key)); 183 184 /* The key is a page offset */ 185 entry->pgoff_key >>= PAGE_SHIFT; 186 187 /* In the event of a collision in the hash table, retry a new key */ 188 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); 189 if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) { 190 HASH_ADD(ucontext->mmap_hash_tbl, &entry->hlist, entry->pgoff_key); 191 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); 192 goto hash_add_done; 193 } 194 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); 195 } while (retry_cnt++ < 10); 196 197 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 198 "mmap table add failed: Cannot find a unique key\n"); 199 kfree(entry); 200 return NULL; 201 202 hash_add_done: 203 /* libc mmap uses a byte offset */ 204 *mmap_offset = entry->pgoff_key << PAGE_SHIFT; 205 206 return entry; 207 } 208 209 static struct irdma_user_mmap_entry * 210 irdma_find_user_mmap_entry(struct irdma_ucontext *ucontext, 211 struct vm_area_struct *vma) 212 { 213 struct irdma_user_mmap_entry *entry; 214 unsigned long flags; 215 216 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 217 return NULL; 218 219 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); 220 HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, vma->vm_pgoff) { 221 if (entry->pgoff_key == vma->vm_pgoff) { 222 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); 223 return entry; 224 } 225 } 226 227 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); 228 229 return NULL; 230 } 231 232 void 233 irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry) 234 { 235 struct irdma_ucontext *ucontext; 236 unsigned long flags; 237 238 if (!entry) 239 return; 240 241 ucontext = entry->ucontext; 242 243 spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags); 244 HASH_DEL(ucontext->mmap_hash_tbl, &entry->hlist); 245 spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags); 246 247 kfree(entry); 248 } 249 250 #endif 251 /** 252 * irdma_mmap - user memory map 253 * @context: context created during alloc 254 * @vma: kernel info for user memory map 255 */ 256 static int 257 irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 258 { 259 #if __FreeBSD_version >= 1400026 260 struct rdma_user_mmap_entry *rdma_entry; 261 #endif 262 struct irdma_user_mmap_entry *entry; 263 struct irdma_ucontext *ucontext; 264 u64 pfn; 265 int ret; 266 267 ucontext = to_ucontext(context); 268 269 /* Legacy support for libi40iw with hard-coded mmap key */ 270 if (ucontext->legacy_mode) 271 return irdma_mmap_legacy(ucontext, vma); 272 273 #if __FreeBSD_version >= 1400026 274 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); 275 if (!rdma_entry) { 276 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 277 "pgoff[0x%lx] does not have valid entry\n", 278 vma->vm_pgoff); 279 return -EINVAL; 280 } 281 282 entry = to_irdma_mmap_entry(rdma_entry); 283 #else 284 entry = irdma_find_user_mmap_entry(ucontext, vma); 285 if (!entry) { 286 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 287 "pgoff[0x%lx] does not have valid entry\n", 288 vma->vm_pgoff); 289 return -EINVAL; 290 } 291 #endif 292 irdma_debug(&ucontext->iwdev->rf->sc_dev, 293 IRDMA_DEBUG_VERBS, "bar_offset [0x%lx] mmap_flag [%d]\n", 294 entry->bar_offset, entry->mmap_flag); 295 296 pfn = (entry->bar_offset + 297 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT; 298 299 switch (entry->mmap_flag) { 300 case IRDMA_MMAP_IO_NC: 301 #if __FreeBSD_version >= 1400026 302 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 303 pgprot_noncached(vma->vm_page_prot), 304 rdma_entry); 305 #else 306 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 307 pgprot_noncached(vma->vm_page_prot)); 308 #endif 309 break; 310 case IRDMA_MMAP_IO_WC: 311 #if __FreeBSD_version >= 1400026 312 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 313 pgprot_writecombine(vma->vm_page_prot), 314 rdma_entry); 315 #else 316 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE, 317 pgprot_writecombine(vma->vm_page_prot)); 318 #endif 319 break; 320 default: 321 ret = -EINVAL; 322 } 323 324 if (ret) 325 irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 326 "bar_offset [0x%lx] mmap_flag[%d] err[%d]\n", 327 entry->bar_offset, entry->mmap_flag, ret); 328 #if __FreeBSD_version >= 1400026 329 rdma_user_mmap_entry_put(rdma_entry); 330 #endif 331 332 return ret; 333 } 334 335 /** 336 * irdma_alloc_push_page - allocate a push page for qp 337 * @iwqp: qp pointer 338 */ 339 static void 340 irdma_alloc_push_page(struct irdma_qp *iwqp) 341 { 342 struct irdma_cqp_request *cqp_request; 343 struct cqp_cmds_info *cqp_info; 344 struct irdma_device *iwdev = iwqp->iwdev; 345 struct irdma_sc_qp *qp = &iwqp->sc_qp; 346 int status; 347 348 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 349 if (!cqp_request) 350 return; 351 352 cqp_info = &cqp_request->info; 353 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE; 354 cqp_info->post_sq = 1; 355 cqp_info->in.u.manage_push_page.info.push_idx = 0; 356 cqp_info->in.u.manage_push_page.info.qs_handle = 357 qp->vsi->qos[qp->user_pri].qs_handle; 358 cqp_info->in.u.manage_push_page.info.free_page = 0; 359 cqp_info->in.u.manage_push_page.info.push_page_type = 0; 360 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp; 361 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; 362 363 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 364 if (!status && cqp_request->compl_info.op_ret_val < 365 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) { 366 qp->push_idx = cqp_request->compl_info.op_ret_val; 367 qp->push_offset = 0; 368 } 369 370 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 371 } 372 373 /** 374 * irdma_get_pbl - Retrieve pbl from a list given a virtual 375 * address 376 * @va: user virtual address 377 * @pbl_list: pbl list to search in (QP's or CQ's) 378 */ 379 struct irdma_pbl * 380 irdma_get_pbl(unsigned long va, 381 struct list_head *pbl_list) 382 { 383 struct irdma_pbl *iwpbl; 384 385 list_for_each_entry(iwpbl, pbl_list, list) { 386 if (iwpbl->user_base == va) { 387 list_del(&iwpbl->list); 388 iwpbl->on_list = false; 389 return iwpbl; 390 } 391 } 392 393 return NULL; 394 } 395 396 /** 397 * irdma_clean_cqes - clean cq entries for qp 398 * @iwqp: qp ptr (user or kernel) 399 * @iwcq: cq ptr 400 */ 401 void 402 irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq) 403 { 404 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; 405 unsigned long flags; 406 407 spin_lock_irqsave(&iwcq->lock, flags); 408 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq); 409 spin_unlock_irqrestore(&iwcq->lock, flags); 410 } 411 412 static u64 irdma_compute_push_wqe_offset(struct irdma_device *iwdev, u32 page_idx){ 413 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 414 415 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) { 416 /* skip over db page */ 417 bar_off += IRDMA_HW_PAGE_SIZE; 418 /* skip over reserved space */ 419 bar_off += IRDMA_PF_BAR_RSVD; 420 } 421 422 /* push wqe page */ 423 bar_off += (u64)page_idx * IRDMA_HW_PAGE_SIZE; 424 425 return bar_off; 426 } 427 428 void 429 irdma_remove_push_mmap_entries(struct irdma_qp *iwqp) 430 { 431 if (iwqp->push_db_mmap_entry) { 432 #if __FreeBSD_version >= 1400026 433 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry); 434 #else 435 irdma_user_mmap_entry_del_hash(iwqp->push_db_mmap_entry); 436 #endif 437 iwqp->push_db_mmap_entry = NULL; 438 } 439 if (iwqp->push_wqe_mmap_entry) { 440 #if __FreeBSD_version >= 1400026 441 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 442 #else 443 irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry); 444 #endif 445 iwqp->push_wqe_mmap_entry = NULL; 446 } 447 } 448 449 static int 450 irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext, 451 struct irdma_qp *iwqp, 452 u64 *push_wqe_mmap_key, 453 u64 *push_db_mmap_key) 454 { 455 struct irdma_device *iwdev = ucontext->iwdev; 456 u64 bar_off; 457 458 WARN_ON_ONCE(iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_2); 459 460 bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx); 461 462 #if __FreeBSD_version >= 1400026 463 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 464 bar_off, IRDMA_MMAP_IO_WC, 465 push_wqe_mmap_key); 466 #else 467 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off, 468 IRDMA_MMAP_IO_WC, 469 push_wqe_mmap_key); 470 #endif 471 if (!iwqp->push_wqe_mmap_entry) 472 return -ENOMEM; 473 474 /* push doorbell page */ 475 bar_off += IRDMA_HW_PAGE_SIZE; 476 #if __FreeBSD_version >= 1400026 477 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext, 478 bar_off, IRDMA_MMAP_IO_NC, 479 push_db_mmap_key); 480 #else 481 482 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off, 483 IRDMA_MMAP_IO_NC, 484 push_db_mmap_key); 485 #endif 486 if (!iwqp->push_db_mmap_entry) { 487 #if __FreeBSD_version >= 1400026 488 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry); 489 #else 490 irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry); 491 #endif 492 return -ENOMEM; 493 } 494 495 return 0; 496 } 497 498 /** 499 * irdma_setup_virt_qp - setup for allocation of virtual qp 500 * @iwdev: irdma device 501 * @iwqp: qp ptr 502 * @init_info: initialize info to return 503 */ 504 void 505 irdma_setup_virt_qp(struct irdma_device *iwdev, 506 struct irdma_qp *iwqp, 507 struct irdma_qp_init_info *init_info) 508 { 509 struct irdma_pbl *iwpbl = iwqp->iwpbl; 510 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 511 512 iwqp->page = qpmr->sq_page; 513 init_info->shadow_area_pa = qpmr->shadow; 514 if (iwpbl->pbl_allocated) { 515 init_info->virtual_map = true; 516 init_info->sq_pa = qpmr->sq_pbl.idx; 517 init_info->rq_pa = qpmr->rq_pbl.idx; 518 } else { 519 init_info->sq_pa = qpmr->sq_pbl.addr; 520 init_info->rq_pa = qpmr->rq_pbl.addr; 521 } 522 } 523 524 /** 525 * irdma_setup_umode_qp - setup sq and rq size in user mode qp 526 * @udata: user data 527 * @iwdev: iwarp device 528 * @iwqp: qp ptr (user or kernel) 529 * @info: initialize info to return 530 * @init_attr: Initial QP create attributes 531 */ 532 int 533 irdma_setup_umode_qp(struct ib_udata *udata, 534 struct irdma_device *iwdev, 535 struct irdma_qp *iwqp, 536 struct irdma_qp_init_info *info, 537 struct ib_qp_init_attr *init_attr) 538 { 539 #if __FreeBSD_version >= 1400026 540 struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 541 #else 542 struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context); 543 #endif 544 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; 545 struct irdma_create_qp_req req = {0}; 546 unsigned long flags; 547 int ret; 548 549 ret = ib_copy_from_udata(&req, udata, 550 min(sizeof(req), udata->inlen)); 551 if (ret) { 552 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 553 "ib_copy_from_data fail\n"); 554 return ret; 555 } 556 557 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; 558 iwqp->user_mode = 1; 559 if (req.user_wqe_bufs) { 560 info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode; 561 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 562 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs, 563 &ucontext->qp_reg_mem_list); 564 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 565 566 if (!iwqp->iwpbl) { 567 ret = -ENODATA; 568 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 569 "no pbl info\n"); 570 return ret; 571 } 572 } 573 574 if (!ucontext->use_raw_attrs) { 575 /** 576 * Maintain backward compat with older ABI which passes sq and 577 * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr. 578 * There is no way to compute the correct value of 579 * iwqp->max_send_wr/max_recv_wr in the kernel. 580 */ 581 iwqp->max_send_wr = init_attr->cap.max_send_wr; 582 iwqp->max_recv_wr = init_attr->cap.max_recv_wr; 583 ukinfo->sq_size = init_attr->cap.max_send_wr; 584 ukinfo->rq_size = init_attr->cap.max_recv_wr; 585 irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift, &ukinfo->rq_shift); 586 } else { 587 ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, 588 &ukinfo->sq_shift); 589 if (ret) 590 return ret; 591 592 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, 593 &ukinfo->rq_shift); 594 if (ret) 595 return ret; 596 597 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; 598 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; 599 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; 600 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; 601 } 602 irdma_setup_virt_qp(iwdev, iwqp, info); 603 604 return 0; 605 } 606 607 /** 608 * irdma_setup_kmode_qp - setup initialization for kernel mode qp 609 * @iwdev: iwarp device 610 * @iwqp: qp ptr (user or kernel) 611 * @info: initialize info to return 612 * @init_attr: Initial QP create attributes 613 */ 614 int 615 irdma_setup_kmode_qp(struct irdma_device *iwdev, 616 struct irdma_qp *iwqp, 617 struct irdma_qp_init_info *info, 618 struct ib_qp_init_attr *init_attr) 619 { 620 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem; 621 u32 size; 622 int status; 623 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; 624 625 status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth, 626 &ukinfo->sq_shift); 627 if (status) 628 return status; 629 630 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth, 631 &ukinfo->rq_shift); 632 if (status) 633 return status; 634 635 iwqp->kqp.sq_wrid_mem = 636 kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); 637 if (!iwqp->kqp.sq_wrid_mem) 638 return -ENOMEM; 639 640 iwqp->kqp.rq_wrid_mem = 641 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL); 642 if (!iwqp->kqp.rq_wrid_mem) { 643 kfree(iwqp->kqp.sq_wrid_mem); 644 iwqp->kqp.sq_wrid_mem = NULL; 645 return -ENOMEM; 646 } 647 648 iwqp->kqp.sig_trk_mem = kcalloc(ukinfo->sq_depth, sizeof(u32), GFP_KERNEL); 649 memset(iwqp->kqp.sig_trk_mem, 0, ukinfo->sq_depth * sizeof(u32)); 650 if (!iwqp->kqp.sig_trk_mem) { 651 kfree(iwqp->kqp.sq_wrid_mem); 652 iwqp->kqp.sq_wrid_mem = NULL; 653 kfree(iwqp->kqp.rq_wrid_mem); 654 iwqp->kqp.rq_wrid_mem = NULL; 655 return -ENOMEM; 656 } 657 ukinfo->sq_sigwrtrk_array = (void *)iwqp->kqp.sig_trk_mem; 658 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem; 659 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem; 660 661 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE; 662 size += (IRDMA_SHADOW_AREA_SIZE << 3); 663 664 mem->size = size; 665 mem->va = irdma_allocate_dma_mem(&iwdev->rf->hw, mem, mem->size, 666 256); 667 if (!mem->va) { 668 kfree(iwqp->kqp.sq_wrid_mem); 669 iwqp->kqp.sq_wrid_mem = NULL; 670 kfree(iwqp->kqp.rq_wrid_mem); 671 iwqp->kqp.rq_wrid_mem = NULL; 672 return -ENOMEM; 673 } 674 675 ukinfo->sq = mem->va; 676 info->sq_pa = mem->pa; 677 ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth]; 678 info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE); 679 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem; 680 info->shadow_area_pa = info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE); 681 ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift; 682 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift; 683 ukinfo->qp_id = iwqp->ibqp.qp_num; 684 685 iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift; 686 iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift; 687 init_attr->cap.max_send_wr = iwqp->max_send_wr; 688 init_attr->cap.max_recv_wr = iwqp->max_recv_wr; 689 690 return 0; 691 } 692 693 int 694 irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) 695 { 696 struct irdma_pci_f *rf = iwqp->iwdev->rf; 697 struct irdma_cqp_request *cqp_request; 698 struct cqp_cmds_info *cqp_info; 699 struct irdma_create_qp_info *qp_info; 700 int status; 701 702 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 703 if (!cqp_request) 704 return -ENOMEM; 705 706 cqp_info = &cqp_request->info; 707 qp_info = &cqp_request->info.in.u.qp_create.info; 708 memset(qp_info, 0, sizeof(*qp_info)); 709 qp_info->mac_valid = true; 710 qp_info->cq_num_valid = true; 711 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; 712 713 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; 714 cqp_info->post_sq = 1; 715 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp; 716 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; 717 status = irdma_handle_cqp_op(rf, cqp_request); 718 irdma_put_cqp_request(&rf->cqp, cqp_request); 719 720 return status; 721 } 722 723 void 724 irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 725 struct irdma_qp_host_ctx_info *ctx_info) 726 { 727 struct irdma_device *iwdev = iwqp->iwdev; 728 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 729 struct irdma_roce_offload_info *roce_info; 730 struct irdma_udp_offload_info *udp_info; 731 732 udp_info = &iwqp->udp_info; 733 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu)); 734 udp_info->cwnd = iwdev->roce_cwnd; 735 udp_info->rexmit_thresh = 2; 736 udp_info->rnr_nak_thresh = 2; 737 udp_info->src_port = 0xc000; 738 udp_info->dst_port = ROCE_V2_UDP_DPORT; 739 roce_info = &iwqp->roce_info; 740 ether_addr_copy(roce_info->mac_addr, IF_LLADDR(iwdev->netdev)); 741 742 roce_info->rd_en = true; 743 roce_info->wr_rdresp_en = true; 744 roce_info->bind_en = true; 745 roce_info->dcqcn_en = false; 746 roce_info->rtomin = iwdev->roce_rtomin; 747 748 roce_info->ack_credits = iwdev->roce_ackcreds; 749 roce_info->ird_size = dev->hw_attrs.max_hw_ird; 750 roce_info->ord_size = dev->hw_attrs.max_hw_ord; 751 752 if (!iwqp->user_mode) { 753 roce_info->priv_mode_en = true; 754 roce_info->fast_reg_en = true; 755 roce_info->udprivcq_en = true; 756 } 757 roce_info->roce_tver = 0; 758 759 ctx_info->roce_info = &iwqp->roce_info; 760 ctx_info->udp_info = &iwqp->udp_info; 761 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 762 } 763 764 void 765 irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp, 766 struct irdma_qp_host_ctx_info *ctx_info) 767 { 768 struct irdma_device *iwdev = iwqp->iwdev; 769 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 770 struct irdma_iwarp_offload_info *iwarp_info; 771 772 iwarp_info = &iwqp->iwarp_info; 773 ether_addr_copy(iwarp_info->mac_addr, IF_LLADDR(iwdev->netdev)); 774 iwarp_info->rd_en = true; 775 iwarp_info->wr_rdresp_en = true; 776 iwarp_info->bind_en = true; 777 iwarp_info->ecn_en = true; 778 iwarp_info->rtomin = 5; 779 780 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 781 iwarp_info->ib_rd_en = true; 782 if (!iwqp->user_mode) { 783 iwarp_info->priv_mode_en = true; 784 iwarp_info->fast_reg_en = true; 785 } 786 iwarp_info->ddp_ver = 1; 787 iwarp_info->rdmap_ver = 1; 788 789 ctx_info->iwarp_info = &iwqp->iwarp_info; 790 ctx_info->iwarp_info_valid = true; 791 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 792 ctx_info->iwarp_info_valid = false; 793 } 794 795 int 796 irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, 797 struct irdma_device *iwdev) 798 { 799 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 800 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 801 802 if (init_attr->create_flags) 803 return -EOPNOTSUPP; 804 805 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || 806 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || 807 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) 808 return -EINVAL; 809 810 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 811 if (init_attr->qp_type != IB_QPT_RC && 812 init_attr->qp_type != IB_QPT_UD && 813 init_attr->qp_type != IB_QPT_GSI) 814 return -EOPNOTSUPP; 815 } else { 816 if (init_attr->qp_type != IB_QPT_RC) 817 return -EOPNOTSUPP; 818 } 819 820 return 0; 821 } 822 823 void 824 irdma_sched_qp_flush_work(struct irdma_qp *iwqp) 825 { 826 if (iwqp->sc_qp.qp_uk.destroy_pending) 827 return; 828 irdma_qp_add_ref(&iwqp->ibqp); 829 if (mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, 830 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS))) 831 irdma_qp_rem_ref(&iwqp->ibqp); 832 } 833 834 void 835 irdma_flush_worker(struct work_struct *work) 836 { 837 struct delayed_work *dwork = to_delayed_work(work); 838 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush); 839 840 irdma_generate_flush_completions(iwqp); 841 /* For the add in irdma_sched_qp_flush_work */ 842 irdma_qp_rem_ref(&iwqp->ibqp); 843 } 844 845 static int 846 irdma_get_ib_acc_flags(struct irdma_qp *iwqp) 847 { 848 int acc_flags = 0; 849 850 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) { 851 if (iwqp->roce_info.wr_rdresp_en) { 852 acc_flags |= IB_ACCESS_LOCAL_WRITE; 853 acc_flags |= IB_ACCESS_REMOTE_WRITE; 854 } 855 if (iwqp->roce_info.rd_en) 856 acc_flags |= IB_ACCESS_REMOTE_READ; 857 if (iwqp->roce_info.bind_en) 858 acc_flags |= IB_ACCESS_MW_BIND; 859 } else { 860 if (iwqp->iwarp_info.wr_rdresp_en) { 861 acc_flags |= IB_ACCESS_LOCAL_WRITE; 862 acc_flags |= IB_ACCESS_REMOTE_WRITE; 863 } 864 if (iwqp->iwarp_info.rd_en) 865 acc_flags |= IB_ACCESS_REMOTE_READ; 866 if (iwqp->iwarp_info.bind_en) 867 acc_flags |= IB_ACCESS_MW_BIND; 868 } 869 return acc_flags; 870 } 871 872 /** 873 * irdma_query_qp - query qp attributes 874 * @ibqp: qp pointer 875 * @attr: attributes pointer 876 * @attr_mask: Not used 877 * @init_attr: qp attributes to return 878 */ 879 static int 880 irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 881 int attr_mask, struct ib_qp_init_attr *init_attr) 882 { 883 struct irdma_qp *iwqp = to_iwqp(ibqp); 884 struct irdma_sc_qp *qp = &iwqp->sc_qp; 885 886 memset(attr, 0, sizeof(*attr)); 887 memset(init_attr, 0, sizeof(*init_attr)); 888 889 attr->qp_state = iwqp->ibqp_state; 890 attr->cur_qp_state = iwqp->ibqp_state; 891 attr->cap.max_send_wr = iwqp->max_send_wr; 892 attr->cap.max_recv_wr = iwqp->max_recv_wr; 893 attr->cap.max_inline_data = qp->qp_uk.max_inline_data; 894 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt; 895 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt; 896 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp); 897 attr->port_num = 1; 898 if (rdma_protocol_roce(ibqp->device, 1)) { 899 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss); 900 attr->qkey = iwqp->roce_info.qkey; 901 attr->rq_psn = iwqp->udp_info.epsn; 902 attr->sq_psn = iwqp->udp_info.psn_nxt; 903 attr->dest_qp_num = iwqp->roce_info.dest_qp; 904 attr->pkey_index = iwqp->roce_info.p_key; 905 attr->retry_cnt = iwqp->udp_info.rexmit_thresh; 906 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh; 907 attr->max_rd_atomic = iwqp->roce_info.ord_size; 908 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size; 909 } 910 911 init_attr->event_handler = iwqp->ibqp.event_handler; 912 init_attr->qp_context = iwqp->ibqp.qp_context; 913 init_attr->send_cq = iwqp->ibqp.send_cq; 914 init_attr->recv_cq = iwqp->ibqp.recv_cq; 915 init_attr->cap = attr->cap; 916 917 return 0; 918 } 919 920 /** 921 * irdma_modify_qp_roce - modify qp request 922 * @ibqp: qp's pointer for modify 923 * @attr: access attributes 924 * @attr_mask: state mask 925 * @udata: user data 926 */ 927 int 928 irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, 929 int attr_mask, struct ib_udata *udata) 930 { 931 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) 932 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) 933 struct irdma_pd *iwpd = to_iwpd(ibqp->pd); 934 struct irdma_qp *iwqp = to_iwqp(ibqp); 935 struct irdma_device *iwdev = iwqp->iwdev; 936 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 937 struct irdma_qp_host_ctx_info *ctx_info; 938 struct irdma_roce_offload_info *roce_info; 939 struct irdma_udp_offload_info *udp_info; 940 struct irdma_modify_qp_info info = {0}; 941 struct irdma_modify_qp_resp uresp = {}; 942 struct irdma_modify_qp_req ureq; 943 unsigned long flags; 944 u8 issue_modify_qp = 0; 945 int ret = 0; 946 947 ctx_info = &iwqp->ctx_info; 948 roce_info = &iwqp->roce_info; 949 udp_info = &iwqp->udp_info; 950 951 if (udata) { 952 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || 953 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) 954 return -EINVAL; 955 } 956 957 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 958 return -EOPNOTSUPP; 959 960 if (attr_mask & IB_QP_DEST_QPN) 961 roce_info->dest_qp = attr->dest_qp_num; 962 963 if (attr_mask & IB_QP_PKEY_INDEX) { 964 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index, 965 &roce_info->p_key); 966 if (ret) 967 return ret; 968 } 969 970 if (attr_mask & IB_QP_QKEY) 971 roce_info->qkey = attr->qkey; 972 973 if (attr_mask & IB_QP_PATH_MTU) 974 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu); 975 976 if (attr_mask & IB_QP_SQ_PSN) { 977 udp_info->psn_nxt = attr->sq_psn; 978 udp_info->lsn = 0xffff; 979 udp_info->psn_una = attr->sq_psn; 980 udp_info->psn_max = attr->sq_psn; 981 } 982 983 if (attr_mask & IB_QP_RQ_PSN) 984 udp_info->epsn = attr->rq_psn; 985 986 if (attr_mask & IB_QP_RNR_RETRY) 987 udp_info->rnr_nak_thresh = attr->rnr_retry; 988 989 if (attr_mask & IB_QP_RETRY_CNT) 990 udp_info->rexmit_thresh = attr->retry_cnt; 991 992 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id; 993 994 if (attr_mask & IB_QP_AV) { 995 struct irdma_av *av = &iwqp->roce_ah.av; 996 u16 vlan_id = VLAN_N_VID; 997 u32 local_ip[4] = {}; 998 999 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah)); 1000 if (attr->ah_attr.ah_flags & IB_AH_GRH) { 1001 udp_info->ttl = attr->ah_attr.grh.hop_limit; 1002 udp_info->flow_label = attr->ah_attr.grh.flow_label; 1003 udp_info->tos = attr->ah_attr.grh.traffic_class; 1004 1005 udp_info->src_port = kc_rdma_get_udp_sport(udp_info->flow_label, 1006 ibqp->qp_num, 1007 roce_info->dest_qp); 1008 1009 irdma_qp_rem_qos(&iwqp->sc_qp); 1010 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); 1011 if (iwqp->sc_qp.vsi->dscp_mode) 1012 ctx_info->user_pri = 1013 iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)]; 1014 else 1015 ctx_info->user_pri = rt_tos2priority(udp_info->tos); 1016 } 1017 ret = kc_irdma_set_roce_cm_info(iwqp, attr, &vlan_id); 1018 if (ret) 1019 return ret; 1020 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri)) 1021 return -ENOMEM; 1022 iwqp->sc_qp.user_pri = ctx_info->user_pri; 1023 irdma_qp_add_qos(&iwqp->sc_qp); 1024 1025 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode) 1026 vlan_id = 0; 1027 if (vlan_id < VLAN_N_VID) { 1028 udp_info->insert_vlan_tag = true; 1029 udp_info->vlan_tag = vlan_id | 1030 ctx_info->user_pri << VLAN_PRIO_SHIFT; 1031 } else { 1032 udp_info->insert_vlan_tag = false; 1033 } 1034 1035 av->attrs = attr->ah_attr; 1036 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); 1037 if (av->net_type == RDMA_NETWORK_IPV6) { 1038 __be32 *daddr = 1039 av->dgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32; 1040 __be32 *saddr = 1041 av->sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32; 1042 1043 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr); 1044 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr); 1045 1046 udp_info->ipv4 = false; 1047 irdma_copy_ip_ntohl(local_ip, daddr); 1048 } else if (av->net_type == RDMA_NETWORK_IPV4) { 1049 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr; 1050 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr; 1051 1052 local_ip[0] = ntohl(daddr); 1053 1054 udp_info->ipv4 = true; 1055 udp_info->dest_ip_addr[0] = 0; 1056 udp_info->dest_ip_addr[1] = 0; 1057 udp_info->dest_ip_addr[2] = 0; 1058 udp_info->dest_ip_addr[3] = local_ip[0]; 1059 1060 udp_info->local_ipaddr[0] = 0; 1061 udp_info->local_ipaddr[1] = 0; 1062 udp_info->local_ipaddr[2] = 0; 1063 udp_info->local_ipaddr[3] = ntohl(saddr); 1064 } else { 1065 return -EINVAL; 1066 } 1067 udp_info->arp_idx = 1068 irdma_add_arp(iwdev->rf, local_ip, 1069 ah_attr_to_dmac(attr->ah_attr)); 1070 } 1071 1072 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1073 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) { 1074 irdma_dev_err(&iwdev->ibdev, 1075 "rd_atomic = %d, above max_hw_ord=%d\n", 1076 attr->max_rd_atomic, 1077 dev->hw_attrs.max_hw_ord); 1078 return -EINVAL; 1079 } 1080 if (attr->max_rd_atomic) 1081 roce_info->ord_size = attr->max_rd_atomic; 1082 info.ord_valid = true; 1083 } 1084 1085 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1086 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) { 1087 irdma_dev_err(&iwdev->ibdev, 1088 "rd_atomic = %d, above max_hw_ird=%d\n", 1089 attr->max_rd_atomic, 1090 dev->hw_attrs.max_hw_ird); 1091 return -EINVAL; 1092 } 1093 if (attr->max_dest_rd_atomic) 1094 roce_info->ird_size = attr->max_dest_rd_atomic; 1095 } 1096 1097 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1098 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1099 roce_info->wr_rdresp_en = true; 1100 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1101 roce_info->wr_rdresp_en = true; 1102 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1103 roce_info->rd_en = true; 1104 } 1105 1106 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1107 1108 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 1109 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n", 1110 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1111 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask); 1112 1113 spin_lock_irqsave(&iwqp->lock, flags); 1114 if (attr_mask & IB_QP_STATE) { 1115 if (!kc_ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state, 1116 iwqp->ibqp.qp_type, attr_mask, 1117 IB_LINK_LAYER_ETHERNET)) { 1118 irdma_dev_warn(&iwdev->ibdev, 1119 "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n", 1120 iwqp->ibqp.qp_num, iwqp->ibqp_state, 1121 attr->qp_state); 1122 ret = -EINVAL; 1123 goto exit; 1124 } 1125 info.curr_iwarp_state = iwqp->iwarp_state; 1126 1127 switch (attr->qp_state) { 1128 case IB_QPS_INIT: 1129 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1130 ret = -EINVAL; 1131 goto exit; 1132 } 1133 1134 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1135 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1136 issue_modify_qp = 1; 1137 } 1138 break; 1139 case IB_QPS_RTR: 1140 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1141 ret = -EINVAL; 1142 goto exit; 1143 } 1144 info.arp_cache_idx_valid = true; 1145 info.cq_num_valid = true; 1146 info.next_iwarp_state = IRDMA_QP_STATE_RTR; 1147 issue_modify_qp = 1; 1148 break; 1149 case IB_QPS_RTS: 1150 if (iwqp->ibqp_state < IB_QPS_RTR || 1151 iwqp->ibqp_state == IB_QPS_ERR) { 1152 ret = -EINVAL; 1153 goto exit; 1154 } 1155 1156 info.arp_cache_idx_valid = true; 1157 info.cq_num_valid = true; 1158 info.ord_valid = true; 1159 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1160 issue_modify_qp = 1; 1161 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) 1162 iwdev->rf->check_fc(&iwdev->vsi, &iwqp->sc_qp); 1163 udp_info->cwnd = iwdev->roce_cwnd; 1164 roce_info->ack_credits = iwdev->roce_ackcreds; 1165 if (iwdev->push_mode && udata && 1166 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1167 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1168 spin_unlock_irqrestore(&iwqp->lock, flags); 1169 irdma_alloc_push_page(iwqp); 1170 spin_lock_irqsave(&iwqp->lock, flags); 1171 } 1172 break; 1173 case IB_QPS_SQD: 1174 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD) 1175 goto exit; 1176 1177 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) { 1178 ret = -EINVAL; 1179 goto exit; 1180 } 1181 1182 info.next_iwarp_state = IRDMA_QP_STATE_SQD; 1183 issue_modify_qp = 1; 1184 break; 1185 case IB_QPS_SQE: 1186 case IB_QPS_ERR: 1187 case IB_QPS_RESET: 1188 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) { 1189 if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) 1190 irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND); 1191 spin_unlock_irqrestore(&iwqp->lock, flags); 1192 info.next_iwarp_state = IRDMA_QP_STATE_SQD; 1193 irdma_hw_modify_qp(iwdev, iwqp, &info, true); 1194 spin_lock_irqsave(&iwqp->lock, flags); 1195 } 1196 1197 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1198 spin_unlock_irqrestore(&iwqp->lock, flags); 1199 if (udata && udata->inlen) { 1200 if (ib_copy_from_udata(&ureq, udata, 1201 min(sizeof(ureq), udata->inlen))) 1202 return -EINVAL; 1203 1204 irdma_flush_wqes(iwqp, 1205 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1206 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1207 IRDMA_REFLUSH); 1208 } 1209 return 0; 1210 } 1211 1212 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1213 issue_modify_qp = 1; 1214 break; 1215 default: 1216 ret = -EINVAL; 1217 goto exit; 1218 } 1219 1220 iwqp->ibqp_state = attr->qp_state; 1221 } 1222 1223 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1224 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1225 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1226 spin_unlock_irqrestore(&iwqp->lock, flags); 1227 1228 if (attr_mask & IB_QP_STATE) { 1229 if (issue_modify_qp) { 1230 ctx_info->rem_endpoint_idx = udp_info->arp_idx; 1231 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1232 return -EINVAL; 1233 spin_lock_irqsave(&iwqp->lock, flags); 1234 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1235 iwqp->iwarp_state = info.next_iwarp_state; 1236 iwqp->ibqp_state = attr->qp_state; 1237 } 1238 if (iwqp->ibqp_state > IB_QPS_RTS && 1239 !iwqp->flush_issued) { 1240 spin_unlock_irqrestore(&iwqp->lock, flags); 1241 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | 1242 IRDMA_FLUSH_RQ | 1243 IRDMA_FLUSH_WAIT); 1244 iwqp->flush_issued = 1; 1245 1246 } else { 1247 spin_unlock_irqrestore(&iwqp->lock, flags); 1248 } 1249 } else { 1250 iwqp->ibqp_state = attr->qp_state; 1251 } 1252 if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1253 struct irdma_ucontext *ucontext; 1254 1255 #if __FreeBSD_version >= 1400026 1256 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 1257 #else 1258 ucontext = to_ucontext(ibqp->uobject->context); 1259 #endif 1260 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1261 !iwqp->push_wqe_mmap_entry && 1262 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1263 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1264 uresp.push_valid = 1; 1265 uresp.push_offset = iwqp->sc_qp.push_offset; 1266 } 1267 uresp.rd_fence_rate = iwdev->rd_fence_rate; 1268 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1269 udata->outlen)); 1270 if (ret) { 1271 irdma_remove_push_mmap_entries(iwqp); 1272 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 1273 "copy_to_udata failed\n"); 1274 return ret; 1275 } 1276 } 1277 } 1278 1279 return 0; 1280 exit: 1281 spin_unlock_irqrestore(&iwqp->lock, flags); 1282 1283 return ret; 1284 } 1285 1286 /** 1287 * irdma_modify_qp - modify qp request 1288 * @ibqp: qp's pointer for modify 1289 * @attr: access attributes 1290 * @attr_mask: state mask 1291 * @udata: user data 1292 */ 1293 int 1294 irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1295 struct ib_udata *udata) 1296 { 1297 #define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush) 1298 #define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid) 1299 struct irdma_qp *iwqp = to_iwqp(ibqp); 1300 struct irdma_device *iwdev = iwqp->iwdev; 1301 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 1302 struct irdma_qp_host_ctx_info *ctx_info; 1303 struct irdma_tcp_offload_info *tcp_info; 1304 struct irdma_iwarp_offload_info *offload_info; 1305 struct irdma_modify_qp_info info = {0}; 1306 struct irdma_modify_qp_resp uresp = {}; 1307 struct irdma_modify_qp_req ureq = {}; 1308 u8 issue_modify_qp = 0; 1309 u8 dont_wait = 0; 1310 int err; 1311 unsigned long flags; 1312 1313 if (udata) { 1314 if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) || 1315 (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN)) 1316 return -EINVAL; 1317 } 1318 1319 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1320 return -EOPNOTSUPP; 1321 1322 ctx_info = &iwqp->ctx_info; 1323 offload_info = &iwqp->iwarp_info; 1324 tcp_info = &iwqp->tcp_info; 1325 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend)); 1326 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 1327 "caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n", 1328 __builtin_return_address(0), ibqp->qp_num, attr->qp_state, 1329 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq, 1330 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask); 1331 1332 spin_lock_irqsave(&iwqp->lock, flags); 1333 if (attr_mask & IB_QP_STATE) { 1334 info.curr_iwarp_state = iwqp->iwarp_state; 1335 switch (attr->qp_state) { 1336 case IB_QPS_INIT: 1337 case IB_QPS_RTR: 1338 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) { 1339 err = -EINVAL; 1340 goto exit; 1341 } 1342 1343 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) { 1344 info.next_iwarp_state = IRDMA_QP_STATE_IDLE; 1345 issue_modify_qp = 1; 1346 } 1347 if (iwdev->push_mode && udata && 1348 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX && 1349 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1350 spin_unlock_irqrestore(&iwqp->lock, flags); 1351 irdma_alloc_push_page(iwqp); 1352 spin_lock_irqsave(&iwqp->lock, flags); 1353 } 1354 break; 1355 case IB_QPS_RTS: 1356 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS || 1357 !iwqp->cm_id) { 1358 err = -EINVAL; 1359 goto exit; 1360 } 1361 1362 issue_modify_qp = 1; 1363 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED; 1364 iwqp->hte_added = 1; 1365 info.next_iwarp_state = IRDMA_QP_STATE_RTS; 1366 info.tcp_ctx_valid = true; 1367 info.ord_valid = true; 1368 info.arp_cache_idx_valid = true; 1369 info.cq_num_valid = true; 1370 break; 1371 case IB_QPS_SQD: 1372 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) { 1373 err = 0; 1374 goto exit; 1375 } 1376 1377 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING || 1378 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) { 1379 err = 0; 1380 goto exit; 1381 } 1382 1383 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) { 1384 err = -EINVAL; 1385 goto exit; 1386 } 1387 1388 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING; 1389 issue_modify_qp = 1; 1390 break; 1391 case IB_QPS_SQE: 1392 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) { 1393 err = -EINVAL; 1394 goto exit; 1395 } 1396 1397 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE; 1398 issue_modify_qp = 1; 1399 break; 1400 case IB_QPS_ERR: 1401 case IB_QPS_RESET: 1402 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) { 1403 spin_unlock_irqrestore(&iwqp->lock, flags); 1404 if (udata && udata->inlen) { 1405 if (ib_copy_from_udata(&ureq, udata, 1406 min(sizeof(ureq), udata->inlen))) 1407 return -EINVAL; 1408 1409 irdma_flush_wqes(iwqp, 1410 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) | 1411 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) | 1412 IRDMA_REFLUSH); 1413 } 1414 return 0; 1415 } 1416 1417 if (iwqp->sc_qp.term_flags) { 1418 spin_unlock_irqrestore(&iwqp->lock, flags); 1419 irdma_terminate_del_timer(&iwqp->sc_qp); 1420 spin_lock_irqsave(&iwqp->lock, flags); 1421 } 1422 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1423 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED && 1424 iwdev->iw_status && 1425 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT) 1426 info.reset_tcp_conn = true; 1427 else 1428 dont_wait = 1; 1429 1430 issue_modify_qp = 1; 1431 info.next_iwarp_state = IRDMA_QP_STATE_ERROR; 1432 break; 1433 default: 1434 err = -EINVAL; 1435 goto exit; 1436 } 1437 1438 iwqp->ibqp_state = attr->qp_state; 1439 } 1440 if (attr_mask & IB_QP_ACCESS_FLAGS) { 1441 ctx_info->iwarp_info_valid = true; 1442 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) 1443 offload_info->wr_rdresp_en = true; 1444 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 1445 offload_info->wr_rdresp_en = true; 1446 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 1447 offload_info->rd_en = true; 1448 } 1449 1450 if (ctx_info->iwarp_info_valid) { 1451 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1452 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1453 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info); 1454 } 1455 spin_unlock_irqrestore(&iwqp->lock, flags); 1456 1457 if (attr_mask & IB_QP_STATE) { 1458 if (issue_modify_qp) { 1459 ctx_info->rem_endpoint_idx = tcp_info->arp_idx; 1460 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true)) 1461 return -EINVAL; 1462 } 1463 1464 spin_lock_irqsave(&iwqp->lock, flags); 1465 if (iwqp->iwarp_state == info.curr_iwarp_state) { 1466 iwqp->iwarp_state = info.next_iwarp_state; 1467 iwqp->ibqp_state = attr->qp_state; 1468 } 1469 spin_unlock_irqrestore(&iwqp->lock, flags); 1470 } 1471 1472 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) { 1473 if (dont_wait) { 1474 if (iwqp->hw_tcp_state) { 1475 spin_lock_irqsave(&iwqp->lock, flags); 1476 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED; 1477 iwqp->last_aeq = IRDMA_AE_RESET_SENT; 1478 spin_unlock_irqrestore(&iwqp->lock, flags); 1479 } 1480 irdma_cm_disconn(iwqp); 1481 } else { 1482 int close_timer_started; 1483 1484 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags); 1485 1486 if (iwqp->cm_node) { 1487 atomic_inc(&iwqp->cm_node->refcnt); 1488 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1489 close_timer_started = atomic_inc_return(&iwqp->close_timer_started); 1490 if (iwqp->cm_id && close_timer_started == 1) 1491 irdma_schedule_cm_timer(iwqp->cm_node, 1492 (struct irdma_puda_buf *)iwqp, 1493 IRDMA_TIMER_TYPE_CLOSE, 1, 0); 1494 1495 irdma_rem_ref_cm_node(iwqp->cm_node); 1496 } else { 1497 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags); 1498 } 1499 } 1500 } 1501 if (attr_mask & IB_QP_STATE && udata && udata->outlen && 1502 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1503 struct irdma_ucontext *ucontext; 1504 1505 #if __FreeBSD_version >= 1400026 1506 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 1507 #else 1508 ucontext = to_ucontext(ibqp->uobject->context); 1509 #endif 1510 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX && 1511 !iwqp->push_wqe_mmap_entry && 1512 !irdma_setup_push_mmap_entries(ucontext, iwqp, 1513 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) { 1514 uresp.push_valid = 1; 1515 uresp.push_offset = iwqp->sc_qp.push_offset; 1516 } 1517 uresp.rd_fence_rate = iwdev->rd_fence_rate; 1518 1519 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), 1520 udata->outlen)); 1521 if (err) { 1522 irdma_remove_push_mmap_entries(iwqp); 1523 irdma_debug(&iwdev->rf->sc_dev, 1524 IRDMA_DEBUG_VERBS, "copy_to_udata failed\n"); 1525 return err; 1526 } 1527 } 1528 1529 return 0; 1530 exit: 1531 spin_unlock_irqrestore(&iwqp->lock, flags); 1532 1533 return err; 1534 } 1535 1536 /** 1537 * irdma_cq_free_rsrc - free up resources for cq 1538 * @rf: RDMA PCI function 1539 * @iwcq: cq ptr 1540 */ 1541 void 1542 irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq) 1543 { 1544 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1545 1546 if (!iwcq->user_mode) { 1547 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem); 1548 irdma_free_dma_mem(rf->sc_dev.hw, &iwcq->kmem_shadow); 1549 } 1550 1551 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id); 1552 } 1553 1554 /** 1555 * irdma_free_cqbuf - worker to free a cq buffer 1556 * @work: provides access to the cq buffer to free 1557 */ 1558 static void 1559 irdma_free_cqbuf(struct work_struct *work) 1560 { 1561 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work); 1562 1563 irdma_free_dma_mem(cq_buf->hw, &cq_buf->kmem_buf); 1564 kfree(cq_buf); 1565 } 1566 1567 /** 1568 * irdma_process_resize_list - remove resized cq buffers from the resize_list 1569 * @iwcq: cq which owns the resize_list 1570 * @iwdev: irdma device 1571 * @lcqe_buf: the buffer where the last cqe is received 1572 */ 1573 int 1574 irdma_process_resize_list(struct irdma_cq *iwcq, 1575 struct irdma_device *iwdev, 1576 struct irdma_cq_buf *lcqe_buf) 1577 { 1578 struct list_head *tmp_node, *list_node; 1579 struct irdma_cq_buf *cq_buf; 1580 int cnt = 0; 1581 1582 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 1583 cq_buf = list_entry(list_node, struct irdma_cq_buf, list); 1584 if (cq_buf == lcqe_buf) 1585 return cnt; 1586 1587 list_del(&cq_buf->list); 1588 queue_work(iwdev->cleanup_wq, &cq_buf->work); 1589 cnt++; 1590 } 1591 1592 return cnt; 1593 } 1594 1595 /** 1596 * irdma_resize_cq - resize cq 1597 * @ibcq: cq to be resized 1598 * @entries: desired cq size 1599 * @udata: user data 1600 */ 1601 static int 1602 irdma_resize_cq(struct ib_cq *ibcq, int entries, 1603 struct ib_udata *udata) 1604 { 1605 #define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer) 1606 struct irdma_cq *iwcq = to_iwcq(ibcq); 1607 struct irdma_sc_dev *dev = iwcq->sc_cq.dev; 1608 struct irdma_cqp_request *cqp_request; 1609 struct cqp_cmds_info *cqp_info; 1610 struct irdma_modify_cq_info *m_info; 1611 struct irdma_modify_cq_info info = {0}; 1612 struct irdma_dma_mem kmem_buf; 1613 struct irdma_cq_mr *cqmr_buf; 1614 struct irdma_pbl *iwpbl_buf; 1615 struct irdma_device *iwdev; 1616 struct irdma_pci_f *rf; 1617 struct irdma_cq_buf *cq_buf = NULL; 1618 unsigned long flags; 1619 int ret; 1620 1621 iwdev = to_iwdev(ibcq->device); 1622 rf = iwdev->rf; 1623 1624 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 1625 IRDMA_FEATURE_CQ_RESIZE)) 1626 return -EOPNOTSUPP; 1627 1628 if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN) 1629 return -EINVAL; 1630 1631 if (entries > rf->max_cqe) 1632 return -EINVAL; 1633 1634 if (!iwcq->user_mode) { 1635 entries++; 1636 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1637 entries *= 2; 1638 } 1639 1640 info.cq_size = max(entries, 4); 1641 1642 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) 1643 return 0; 1644 1645 if (udata) { 1646 struct irdma_resize_cq_req req = {}; 1647 struct irdma_ucontext *ucontext = 1648 #if __FreeBSD_version >= 1400026 1649 rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 1650 #else 1651 to_ucontext(ibcq->uobject->context); 1652 #endif 1653 1654 /* CQ resize not supported with legacy GEN_1 libi40iw */ 1655 if (ucontext->legacy_mode) 1656 return -EOPNOTSUPP; 1657 1658 if (ib_copy_from_udata(&req, udata, 1659 min(sizeof(req), udata->inlen))) 1660 return -EINVAL; 1661 1662 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1663 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer, 1664 &ucontext->cq_reg_mem_list); 1665 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1666 1667 if (!iwpbl_buf) 1668 return -ENOMEM; 1669 1670 cqmr_buf = &iwpbl_buf->cq_mr; 1671 if (iwpbl_buf->pbl_allocated) { 1672 info.virtual_map = true; 1673 info.pbl_chunk_size = 1; 1674 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx; 1675 } else { 1676 info.cq_pa = cqmr_buf->cq_pbl.addr; 1677 } 1678 } else { 1679 /* Kmode CQ resize */ 1680 int rsize; 1681 1682 rsize = info.cq_size * sizeof(struct irdma_cqe); 1683 kmem_buf.size = round_up(rsize, 256); 1684 kmem_buf.va = irdma_allocate_dma_mem(dev->hw, &kmem_buf, 1685 kmem_buf.size, 256); 1686 if (!kmem_buf.va) 1687 return -ENOMEM; 1688 1689 info.cq_base = kmem_buf.va; 1690 info.cq_pa = kmem_buf.pa; 1691 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL); 1692 if (!cq_buf) { 1693 ret = -ENOMEM; 1694 goto error; 1695 } 1696 } 1697 1698 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 1699 if (!cqp_request) { 1700 ret = -ENOMEM; 1701 goto error; 1702 } 1703 1704 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold; 1705 info.cq_resize = true; 1706 1707 cqp_info = &cqp_request->info; 1708 m_info = &cqp_info->in.u.cq_modify.info; 1709 memcpy(m_info, &info, sizeof(*m_info)); 1710 1711 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY; 1712 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq; 1713 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request; 1714 cqp_info->post_sq = 1; 1715 ret = irdma_handle_cqp_op(rf, cqp_request); 1716 irdma_put_cqp_request(&rf->cqp, cqp_request); 1717 if (ret) 1718 goto error; 1719 1720 spin_lock_irqsave(&iwcq->lock, flags); 1721 if (cq_buf) { 1722 cq_buf->kmem_buf = iwcq->kmem; 1723 cq_buf->hw = dev->hw; 1724 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk)); 1725 INIT_WORK(&cq_buf->work, irdma_free_cqbuf); 1726 list_add_tail(&cq_buf->list, &iwcq->resize_list); 1727 iwcq->kmem = kmem_buf; 1728 } 1729 1730 irdma_sc_cq_resize(&iwcq->sc_cq, &info); 1731 ibcq->cqe = info.cq_size - 1; 1732 spin_unlock_irqrestore(&iwcq->lock, flags); 1733 1734 return 0; 1735 error: 1736 if (!udata) 1737 irdma_free_dma_mem(dev->hw, &kmem_buf); 1738 kfree(cq_buf); 1739 1740 return ret; 1741 } 1742 1743 /** 1744 * irdma_get_mr_access - get hw MR access permissions from IB access flags 1745 * @access: IB access flags 1746 */ 1747 static inline u16 irdma_get_mr_access(int access){ 1748 u16 hw_access = 0; 1749 1750 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ? 1751 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0; 1752 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ? 1753 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0; 1754 hw_access |= (access & IB_ACCESS_REMOTE_READ) ? 1755 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0; 1756 hw_access |= (access & IB_ACCESS_MW_BIND) ? 1757 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0; 1758 hw_access |= (access & IB_ZERO_BASED) ? 1759 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0; 1760 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD; 1761 1762 return hw_access; 1763 } 1764 1765 /** 1766 * irdma_free_stag - free stag resource 1767 * @iwdev: irdma device 1768 * @stag: stag to free 1769 */ 1770 void 1771 irdma_free_stag(struct irdma_device *iwdev, u32 stag) 1772 { 1773 u32 stag_idx; 1774 1775 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S; 1776 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx); 1777 } 1778 1779 /** 1780 * irdma_create_stag - create random stag 1781 * @iwdev: irdma device 1782 */ 1783 u32 1784 irdma_create_stag(struct irdma_device *iwdev) 1785 { 1786 u32 stag; 1787 u32 stag_index = 0; 1788 u32 next_stag_index; 1789 u32 driver_key; 1790 u32 random; 1791 u8 consumer_key; 1792 int ret; 1793 1794 get_random_bytes(&random, sizeof(random)); 1795 consumer_key = (u8)random; 1796 1797 driver_key = random & ~iwdev->rf->mr_stagmask; 1798 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8; 1799 next_stag_index %= iwdev->rf->max_mr; 1800 1801 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, 1802 iwdev->rf->max_mr, &stag_index, 1803 &next_stag_index); 1804 if (ret) 1805 return 0; 1806 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S; 1807 stag |= driver_key; 1808 stag += (u32)consumer_key; 1809 1810 return stag; 1811 } 1812 1813 /** 1814 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous 1815 * @arr: lvl1 pbl array 1816 * @npages: page count 1817 * @pg_size: page size 1818 * 1819 */ 1820 static bool 1821 irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) 1822 { 1823 u32 pg_idx; 1824 1825 for (pg_idx = 0; pg_idx < npages; pg_idx++) { 1826 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) 1827 return false; 1828 } 1829 1830 return true; 1831 } 1832 1833 /** 1834 * irdma_check_mr_contiguous - check if MR is physically contiguous 1835 * @palloc: pbl allocation struct 1836 * @pg_size: page size 1837 */ 1838 static bool 1839 irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, 1840 u32 pg_size) 1841 { 1842 struct irdma_pble_level2 *lvl2 = &palloc->level2; 1843 struct irdma_pble_info *leaf = lvl2->leaf; 1844 u64 *arr = NULL; 1845 u64 *start_addr = NULL; 1846 int i; 1847 bool ret; 1848 1849 if (palloc->level == PBLE_LEVEL_1) { 1850 arr = palloc->level1.addr; 1851 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt, 1852 pg_size); 1853 return ret; 1854 } 1855 1856 start_addr = leaf->addr; 1857 1858 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { 1859 arr = leaf->addr; 1860 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) 1861 return false; 1862 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size); 1863 if (!ret) 1864 return false; 1865 } 1866 1867 return true; 1868 } 1869 1870 /** 1871 * irdma_setup_pbles - copy user pg address to pble's 1872 * @rf: RDMA PCI function 1873 * @iwmr: mr pointer for this memory registration 1874 * @lvl: requested pble levels 1875 */ 1876 static int 1877 irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, 1878 u8 lvl) 1879 { 1880 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 1881 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 1882 struct irdma_pble_info *pinfo; 1883 u64 *pbl; 1884 int status; 1885 enum irdma_pble_level level = PBLE_LEVEL_1; 1886 1887 if (lvl) { 1888 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, 1889 lvl); 1890 if (status) 1891 return status; 1892 1893 iwpbl->pbl_allocated = true; 1894 level = palloc->level; 1895 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 : 1896 palloc->level2.leaf; 1897 pbl = pinfo->addr; 1898 } else { 1899 pbl = iwmr->pgaddrmem; 1900 } 1901 1902 irdma_copy_user_pgaddrs(iwmr, pbl, level); 1903 1904 if (lvl) 1905 iwmr->pgaddrmem[0] = *pbl; 1906 1907 return 0; 1908 } 1909 1910 /** 1911 * irdma_handle_q_mem - handle memory for qp and cq 1912 * @iwdev: irdma device 1913 * @req: information for q memory management 1914 * @iwpbl: pble struct 1915 * @lvl: pble level mask 1916 */ 1917 static int 1918 irdma_handle_q_mem(struct irdma_device *iwdev, 1919 struct irdma_mem_reg_req *req, 1920 struct irdma_pbl *iwpbl, u8 lvl) 1921 { 1922 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 1923 struct irdma_mr *iwmr = iwpbl->iwmr; 1924 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr; 1925 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr; 1926 struct irdma_hmc_pble *hmc_p; 1927 u64 *arr = iwmr->pgaddrmem; 1928 u32 pg_size, total; 1929 int err = 0; 1930 bool ret = true; 1931 1932 pg_size = iwmr->page_size; 1933 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); 1934 if (err) 1935 return err; 1936 1937 if (lvl) 1938 arr = palloc->level1.addr; 1939 1940 switch (iwmr->type) { 1941 case IRDMA_MEMREG_TYPE_QP: 1942 total = req->sq_pages + req->rq_pages; 1943 hmc_p = &qpmr->sq_pbl; 1944 qpmr->shadow = (dma_addr_t) arr[total]; 1945 if (lvl) { 1946 ret = irdma_check_mem_contiguous(arr, req->sq_pages, 1947 pg_size); 1948 if (ret) 1949 ret = irdma_check_mem_contiguous(&arr[req->sq_pages], 1950 req->rq_pages, 1951 pg_size); 1952 } 1953 1954 if (!ret) { 1955 hmc_p->idx = palloc->level1.idx; 1956 hmc_p = &qpmr->rq_pbl; 1957 hmc_p->idx = palloc->level1.idx + req->sq_pages; 1958 } else { 1959 hmc_p->addr = arr[0]; 1960 hmc_p = &qpmr->rq_pbl; 1961 hmc_p->addr = arr[req->sq_pages]; 1962 } 1963 break; 1964 case IRDMA_MEMREG_TYPE_CQ: 1965 hmc_p = &cqmr->cq_pbl; 1966 1967 if (!cqmr->split) 1968 cqmr->shadow = (dma_addr_t) arr[req->cq_pages]; 1969 1970 if (lvl) 1971 ret = irdma_check_mem_contiguous(arr, req->cq_pages, 1972 pg_size); 1973 1974 if (!ret) 1975 hmc_p->idx = palloc->level1.idx; 1976 else 1977 hmc_p->addr = arr[0]; 1978 break; 1979 default: 1980 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "MR type error\n"); 1981 err = -EINVAL; 1982 } 1983 1984 if (lvl && ret) { 1985 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 1986 iwpbl->pbl_allocated = false; 1987 } 1988 1989 return err; 1990 } 1991 1992 /** 1993 * irdma_hw_alloc_mw - create the hw memory window 1994 * @iwdev: irdma device 1995 * @iwmr: pointer to memory window info 1996 */ 1997 int 1998 irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) 1999 { 2000 struct irdma_mw_alloc_info *info; 2001 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); 2002 struct irdma_cqp_request *cqp_request; 2003 struct cqp_cmds_info *cqp_info; 2004 int status; 2005 2006 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2007 if (!cqp_request) 2008 return -ENOMEM; 2009 2010 cqp_info = &cqp_request->info; 2011 info = &cqp_info->in.u.mw_alloc.info; 2012 memset(info, 0, sizeof(*info)); 2013 if (iwmr->ibmw.type == IB_MW_TYPE_1) 2014 info->mw_wide = true; 2015 2016 info->page_size = PAGE_SIZE; 2017 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2018 info->pd_id = iwpd->sc_pd.pd_id; 2019 info->remote_access = true; 2020 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC; 2021 cqp_info->post_sq = 1; 2022 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev; 2023 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request; 2024 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2025 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2026 2027 return status; 2028 } 2029 2030 /** 2031 * irdma_dealloc_mw - Dealloc memory window 2032 * @ibmw: memory window structure. 2033 */ 2034 static int 2035 irdma_dealloc_mw(struct ib_mw *ibmw) 2036 { 2037 struct ib_pd *ibpd = ibmw->pd; 2038 struct irdma_pd *iwpd = to_iwpd(ibpd); 2039 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw); 2040 struct irdma_device *iwdev = to_iwdev(ibmw->device); 2041 struct irdma_cqp_request *cqp_request; 2042 struct cqp_cmds_info *cqp_info; 2043 struct irdma_dealloc_stag_info *info; 2044 2045 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2046 if (!cqp_request) 2047 return -ENOMEM; 2048 2049 cqp_info = &cqp_request->info; 2050 info = &cqp_info->in.u.dealloc_stag.info; 2051 memset(info, 0, sizeof(*info)); 2052 info->pd_id = iwpd->sc_pd.pd_id; 2053 info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S); 2054 info->mr = false; 2055 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 2056 cqp_info->post_sq = 1; 2057 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 2058 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 2059 irdma_handle_cqp_op(iwdev->rf, cqp_request); 2060 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2061 irdma_free_stag(iwdev, iwmr->stag); 2062 kfree(iwmr); 2063 2064 return 0; 2065 } 2066 2067 /** 2068 * irdma_hw_alloc_stag - cqp command to allocate stag 2069 * @iwdev: irdma device 2070 * @iwmr: irdma mr pointer 2071 */ 2072 int 2073 irdma_hw_alloc_stag(struct irdma_device *iwdev, 2074 struct irdma_mr *iwmr) 2075 { 2076 struct irdma_allocate_stag_info *info; 2077 struct ib_pd *pd = iwmr->ibmr.pd; 2078 struct irdma_pd *iwpd = to_iwpd(pd); 2079 struct irdma_cqp_request *cqp_request; 2080 struct cqp_cmds_info *cqp_info; 2081 int status; 2082 2083 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2084 if (!cqp_request) 2085 return -ENOMEM; 2086 2087 cqp_info = &cqp_request->info; 2088 info = &cqp_info->in.u.alloc_stag.info; 2089 memset(info, 0, sizeof(*info)); 2090 info->page_size = PAGE_SIZE; 2091 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2092 info->pd_id = iwpd->sc_pd.pd_id; 2093 info->total_len = iwmr->len; 2094 info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false; 2095 info->remote_access = true; 2096 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; 2097 cqp_info->post_sq = 1; 2098 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev; 2099 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; 2100 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2101 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2102 if (!status) 2103 iwmr->is_hwreg = 1; 2104 2105 return status; 2106 } 2107 2108 /** 2109 * irdma_set_page - populate pbl list for fmr 2110 * @ibmr: ib mem to access iwarp mr pointer 2111 * @addr: page dma address fro pbl list 2112 */ 2113 static int 2114 irdma_set_page(struct ib_mr *ibmr, u64 addr) 2115 { 2116 struct irdma_mr *iwmr = to_iwmr(ibmr); 2117 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2118 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2119 u64 *pbl; 2120 2121 if (unlikely(iwmr->npages == iwmr->page_cnt)) 2122 return -ENOMEM; 2123 2124 if (palloc->level == PBLE_LEVEL_2) { 2125 struct irdma_pble_info *palloc_info = 2126 palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT); 2127 2128 palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr; 2129 } else { 2130 pbl = palloc->level1.addr; 2131 pbl[iwmr->npages] = addr; 2132 } 2133 2134 iwmr->npages++; 2135 return 0; 2136 } 2137 2138 /** 2139 * irdma_map_mr_sg - map of sg list for fmr 2140 * @ibmr: ib mem to access iwarp mr pointer 2141 * @sg: scatter gather list 2142 * @sg_nents: number of sg pages 2143 * @sg_offset: scatter gather list for fmr 2144 */ 2145 static int 2146 irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 2147 int sg_nents, unsigned int *sg_offset) 2148 { 2149 struct irdma_mr *iwmr = to_iwmr(ibmr); 2150 2151 iwmr->npages = 0; 2152 2153 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page); 2154 } 2155 2156 /** 2157 * irdma_hwreg_mr - send cqp command for memory registration 2158 * @iwdev: irdma device 2159 * @iwmr: irdma mr pointer 2160 * @access: access for MR 2161 */ 2162 int 2163 irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, 2164 u16 access) 2165 { 2166 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2167 struct irdma_reg_ns_stag_info *stag_info; 2168 struct ib_pd *pd = iwmr->ibmr.pd; 2169 struct irdma_pd *iwpd = to_iwpd(pd); 2170 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2171 struct irdma_cqp_request *cqp_request; 2172 struct cqp_cmds_info *cqp_info; 2173 int ret; 2174 2175 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2176 if (!cqp_request) 2177 return -ENOMEM; 2178 2179 cqp_info = &cqp_request->info; 2180 stag_info = &cqp_info->in.u.mr_reg_non_shared.info; 2181 memset(stag_info, 0, sizeof(*stag_info)); 2182 stag_info->va = iwpbl->user_base; 2183 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; 2184 stag_info->stag_key = (u8)iwmr->stag; 2185 stag_info->total_len = iwmr->len; 2186 stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false; 2187 stag_info->access_rights = irdma_get_mr_access(access); 2188 stag_info->pd_id = iwpd->sc_pd.pd_id; 2189 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) 2190 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; 2191 else 2192 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED; 2193 stag_info->page_size = iwmr->page_size; 2194 2195 if (iwpbl->pbl_allocated) { 2196 if (palloc->level == PBLE_LEVEL_1) { 2197 stag_info->first_pm_pbl_index = palloc->level1.idx; 2198 stag_info->chunk_size = 1; 2199 } else { 2200 stag_info->first_pm_pbl_index = palloc->level2.root.idx; 2201 stag_info->chunk_size = 3; 2202 } 2203 } else { 2204 stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; 2205 } 2206 2207 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED; 2208 cqp_info->post_sq = 1; 2209 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev; 2210 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; 2211 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2212 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2213 2214 if (!ret) 2215 iwmr->is_hwreg = 1; 2216 2217 return ret; 2218 } 2219 2220 /** 2221 * irdma_reg_user_mr - Register a user memory region 2222 * @pd: ptr of pd 2223 * @start: virtual start address 2224 * @len: length of mr 2225 * @virt: virtual address 2226 * @access: access of mr 2227 * @udata: user data 2228 */ 2229 static struct ib_mr * 2230 irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, 2231 u64 virt, int access, 2232 struct ib_udata *udata) 2233 { 2234 #define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages) 2235 struct irdma_device *iwdev = to_iwdev(pd->device); 2236 struct irdma_ucontext *ucontext; 2237 struct irdma_pble_alloc *palloc; 2238 struct irdma_pbl *iwpbl; 2239 struct irdma_mr *iwmr; 2240 struct ib_umem *region; 2241 struct irdma_mem_reg_req req = {}; 2242 u32 total, stag = 0; 2243 u8 shadow_pgcnt = 1; 2244 unsigned long flags; 2245 int err = -EINVAL; 2246 u8 lvl; 2247 int ret; 2248 2249 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 2250 return ERR_PTR(-EINVAL); 2251 2252 if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN) 2253 return ERR_PTR(-EINVAL); 2254 2255 region = ib_umem_get(pd->uobject->context, start, len, access, 0); 2256 2257 if (IS_ERR(region)) { 2258 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 2259 "Failed to create ib_umem region\n"); 2260 return (struct ib_mr *)region; 2261 } 2262 2263 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) { 2264 ib_umem_release(region); 2265 return ERR_PTR(-EFAULT); 2266 } 2267 2268 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 2269 if (!iwmr) { 2270 ib_umem_release(region); 2271 return ERR_PTR(-ENOMEM); 2272 } 2273 2274 iwpbl = &iwmr->iwpbl; 2275 iwpbl->iwmr = iwmr; 2276 iwmr->region = region; 2277 iwmr->ibmr.pd = pd; 2278 iwmr->ibmr.device = pd->device; 2279 iwmr->ibmr.iova = virt; 2280 iwmr->page_size = IRDMA_HW_PAGE_SIZE; 2281 iwmr->page_msk = ~(IRDMA_HW_PAGE_SIZE - 1); 2282 2283 iwmr->len = region->length; 2284 iwpbl->user_base = virt; 2285 palloc = &iwpbl->pble_alloc; 2286 iwmr->type = req.reg_type; 2287 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, virt); 2288 2289 switch (req.reg_type) { 2290 case IRDMA_MEMREG_TYPE_QP: 2291 total = req.sq_pages + req.rq_pages + shadow_pgcnt; 2292 if (total > iwmr->page_cnt) { 2293 err = -EINVAL; 2294 goto error; 2295 } 2296 total = req.sq_pages + req.rq_pages; 2297 lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; 2298 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); 2299 if (err) 2300 goto error; 2301 2302 #if __FreeBSD_version >= 1400026 2303 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 2304 #else 2305 ucontext = to_ucontext(pd->uobject->context); 2306 #endif 2307 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2308 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 2309 iwpbl->on_list = true; 2310 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2311 break; 2312 case IRDMA_MEMREG_TYPE_CQ: 2313 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) 2314 shadow_pgcnt = 0; 2315 total = req.cq_pages + shadow_pgcnt; 2316 if (total > iwmr->page_cnt) { 2317 err = -EINVAL; 2318 goto error; 2319 } 2320 2321 lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0; 2322 err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl); 2323 if (err) 2324 goto error; 2325 2326 #if __FreeBSD_version >= 1400026 2327 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 2328 #else 2329 ucontext = to_ucontext(pd->uobject->context); 2330 #endif 2331 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2332 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 2333 iwpbl->on_list = true; 2334 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2335 break; 2336 case IRDMA_MEMREG_TYPE_MEM: 2337 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0; 2338 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); 2339 if (err) 2340 goto error; 2341 2342 if (lvl) { 2343 ret = irdma_check_mr_contiguous(palloc, 2344 iwmr->page_size); 2345 if (ret) { 2346 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2347 iwpbl->pbl_allocated = false; 2348 } 2349 } 2350 2351 stag = irdma_create_stag(iwdev); 2352 if (!stag) { 2353 err = -ENOMEM; 2354 goto error; 2355 } 2356 2357 iwmr->stag = stag; 2358 iwmr->ibmr.rkey = stag; 2359 iwmr->ibmr.lkey = stag; 2360 iwmr->access = access; 2361 err = irdma_hwreg_mr(iwdev, iwmr, access); 2362 if (err) { 2363 irdma_free_stag(iwdev, stag); 2364 goto error; 2365 } 2366 2367 break; 2368 default: 2369 goto error; 2370 } 2371 2372 iwmr->type = req.reg_type; 2373 2374 return &iwmr->ibmr; 2375 2376 error: 2377 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) 2378 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2379 ib_umem_release(region); 2380 kfree(iwmr); 2381 2382 return ERR_PTR(err); 2383 } 2384 2385 int 2386 irdma_hwdereg_mr(struct ib_mr *ib_mr) 2387 { 2388 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 2389 struct irdma_mr *iwmr = to_iwmr(ib_mr); 2390 struct irdma_pd *iwpd = to_iwpd(ib_mr->pd); 2391 struct irdma_dealloc_stag_info *info; 2392 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2393 struct irdma_cqp_request *cqp_request; 2394 struct cqp_cmds_info *cqp_info; 2395 int status; 2396 2397 /* 2398 * Skip HW MR de-register when it is already de-registered during an MR re-reregister and the re-registration 2399 * fails 2400 */ 2401 if (!iwmr->is_hwreg) 2402 return 0; 2403 2404 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 2405 if (!cqp_request) 2406 return -ENOMEM; 2407 2408 cqp_info = &cqp_request->info; 2409 info = &cqp_info->in.u.dealloc_stag.info; 2410 memset(info, 0, sizeof(*info)); 2411 info->pd_id = iwpd->sc_pd.pd_id; 2412 info->stag_idx = RS_64_1(ib_mr->rkey, IRDMA_CQPSQ_STAG_IDX_S); 2413 info->mr = true; 2414 if (iwpbl->pbl_allocated) 2415 info->dealloc_pbl = true; 2416 2417 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; 2418 cqp_info->post_sq = 1; 2419 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; 2420 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; 2421 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 2422 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 2423 2424 if (!status) 2425 iwmr->is_hwreg = 0; 2426 2427 return status; 2428 } 2429 2430 /* 2431 * irdma_rereg_mr_trans - Re-register a user MR for a change translation. @iwmr: ptr of iwmr @start: virtual start 2432 * address @len: length of mr @virt: virtual address 2433 * 2434 * Re-register a user memory region when a change translation is requested. Re-register a new region while reusing the 2435 * stag from the original registration. 2436 */ 2437 struct ib_mr * 2438 irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, 2439 u64 virt, struct ib_udata *udata) 2440 { 2441 struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); 2442 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2443 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 2444 struct ib_pd *pd = iwmr->ibmr.pd; 2445 struct ib_umem *region; 2446 u8 lvl; 2447 int err; 2448 2449 region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0); 2450 2451 if (IS_ERR(region)) { 2452 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 2453 "Failed to create ib_umem region\n"); 2454 return (struct ib_mr *)region; 2455 } 2456 2457 iwmr->region = region; 2458 iwmr->ibmr.iova = virt; 2459 iwmr->ibmr.pd = pd; 2460 iwmr->page_size = PAGE_SIZE; 2461 2462 iwmr->len = region->length; 2463 iwpbl->user_base = virt; 2464 iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size, 2465 virt); 2466 2467 lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0; 2468 2469 err = irdma_setup_pbles(iwdev->rf, iwmr, lvl); 2470 if (err) 2471 goto error; 2472 2473 if (lvl) { 2474 err = irdma_check_mr_contiguous(palloc, 2475 iwmr->page_size); 2476 if (err) { 2477 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2478 iwpbl->pbl_allocated = false; 2479 } 2480 } 2481 2482 err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access); 2483 if (err) 2484 goto error; 2485 2486 return &iwmr->ibmr; 2487 2488 error: 2489 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) { 2490 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 2491 iwpbl->pbl_allocated = false; 2492 } 2493 ib_umem_release(region); 2494 iwmr->region = NULL; 2495 2496 return ERR_PTR(err); 2497 } 2498 2499 /** 2500 * irdma_reg_phys_mr - register kernel physical memory 2501 * @pd: ibpd pointer 2502 * @addr: physical address of memory to register 2503 * @size: size of memory to register 2504 * @access: Access rights 2505 * @iova_start: start of virtual address for physical buffers 2506 */ 2507 struct ib_mr * 2508 irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, 2509 u64 *iova_start) 2510 { 2511 struct irdma_device *iwdev = to_iwdev(pd->device); 2512 struct irdma_pbl *iwpbl; 2513 struct irdma_mr *iwmr; 2514 u32 stag; 2515 int ret; 2516 2517 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 2518 if (!iwmr) 2519 return ERR_PTR(-ENOMEM); 2520 2521 iwmr->ibmr.pd = pd; 2522 iwmr->ibmr.device = pd->device; 2523 iwpbl = &iwmr->iwpbl; 2524 iwpbl->iwmr = iwmr; 2525 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 2526 iwpbl->user_base = *iova_start; 2527 stag = irdma_create_stag(iwdev); 2528 if (!stag) { 2529 ret = -ENOMEM; 2530 goto err; 2531 } 2532 2533 iwmr->stag = stag; 2534 iwmr->ibmr.iova = *iova_start; 2535 iwmr->ibmr.rkey = stag; 2536 iwmr->ibmr.lkey = stag; 2537 iwmr->page_cnt = 1; 2538 iwmr->pgaddrmem[0] = addr; 2539 iwmr->len = size; 2540 iwmr->page_size = SZ_4K; 2541 ret = irdma_hwreg_mr(iwdev, iwmr, access); 2542 if (ret) { 2543 irdma_free_stag(iwdev, stag); 2544 goto err; 2545 } 2546 2547 return &iwmr->ibmr; 2548 2549 err: 2550 kfree(iwmr); 2551 2552 return ERR_PTR(ret); 2553 } 2554 2555 /** 2556 * irdma_get_dma_mr - register physical mem 2557 * @pd: ptr of pd 2558 * @acc: access for memory 2559 */ 2560 static struct ib_mr * 2561 irdma_get_dma_mr(struct ib_pd *pd, int acc) 2562 { 2563 u64 kva = 0; 2564 2565 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva); 2566 } 2567 2568 /** 2569 * irdma_del_memlist - Deleting pbl list entries for CQ/QP 2570 * @iwmr: iwmr for IB's user page addresses 2571 * @ucontext: ptr to user context 2572 */ 2573 void 2574 irdma_del_memlist(struct irdma_mr *iwmr, 2575 struct irdma_ucontext *ucontext) 2576 { 2577 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 2578 unsigned long flags; 2579 2580 switch (iwmr->type) { 2581 case IRDMA_MEMREG_TYPE_CQ: 2582 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2583 if (iwpbl->on_list) { 2584 iwpbl->on_list = false; 2585 list_del(&iwpbl->list); 2586 } 2587 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2588 break; 2589 case IRDMA_MEMREG_TYPE_QP: 2590 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2591 if (iwpbl->on_list) { 2592 iwpbl->on_list = false; 2593 list_del(&iwpbl->list); 2594 } 2595 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2596 break; 2597 default: 2598 break; 2599 } 2600 } 2601 2602 /** 2603 * irdma_copy_sg_list - copy sg list for qp 2604 * @sg_list: copied into sg_list 2605 * @sgl: copy from sgl 2606 * @num_sges: count of sg entries 2607 */ 2608 static void 2609 irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl, 2610 int num_sges) 2611 { 2612 unsigned int i; 2613 2614 for (i = 0; i < num_sges; i++) { 2615 sg_list[i].tag_off = sgl[i].addr; 2616 sg_list[i].len = sgl[i].length; 2617 sg_list[i].stag = sgl[i].lkey; 2618 } 2619 } 2620 2621 /** 2622 * irdma_post_send - kernel application wr 2623 * @ibqp: qp ptr for wr 2624 * @ib_wr: work request ptr 2625 * @bad_wr: return of bad wr if err 2626 */ 2627 static int 2628 irdma_post_send(struct ib_qp *ibqp, 2629 const struct ib_send_wr *ib_wr, 2630 const struct ib_send_wr **bad_wr) 2631 { 2632 struct irdma_qp *iwqp; 2633 struct irdma_qp_uk *ukqp; 2634 struct irdma_sc_dev *dev; 2635 struct irdma_post_sq_info info; 2636 int err = 0; 2637 unsigned long flags; 2638 bool inv_stag; 2639 struct irdma_ah *ah; 2640 2641 iwqp = to_iwqp(ibqp); 2642 ukqp = &iwqp->sc_qp.qp_uk; 2643 dev = &iwqp->iwdev->rf->sc_dev; 2644 2645 spin_lock_irqsave(&iwqp->lock, flags); 2646 while (ib_wr) { 2647 memset(&info, 0, sizeof(info)); 2648 inv_stag = false; 2649 info.wr_id = (ib_wr->wr_id); 2650 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) 2651 info.signaled = true; 2652 if (ib_wr->send_flags & IB_SEND_FENCE) 2653 info.read_fence = true; 2654 switch (ib_wr->opcode) { 2655 case IB_WR_SEND_WITH_IMM: 2656 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) { 2657 info.imm_data_valid = true; 2658 info.imm_data = ntohl(ib_wr->ex.imm_data); 2659 } else { 2660 err = -EINVAL; 2661 break; 2662 } 2663 /* fallthrough */ 2664 case IB_WR_SEND: 2665 case IB_WR_SEND_WITH_INV: 2666 if (ib_wr->opcode == IB_WR_SEND || 2667 ib_wr->opcode == IB_WR_SEND_WITH_IMM) { 2668 if (ib_wr->send_flags & IB_SEND_SOLICITED) 2669 info.op_type = IRDMA_OP_TYPE_SEND_SOL; 2670 else 2671 info.op_type = IRDMA_OP_TYPE_SEND; 2672 } else { 2673 if (ib_wr->send_flags & IB_SEND_SOLICITED) 2674 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV; 2675 else 2676 info.op_type = IRDMA_OP_TYPE_SEND_INV; 2677 info.stag_to_inv = ib_wr->ex.invalidate_rkey; 2678 } 2679 2680 info.op.send.num_sges = ib_wr->num_sge; 2681 info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list; 2682 if (iwqp->ibqp.qp_type == IB_QPT_UD || 2683 iwqp->ibqp.qp_type == IB_QPT_GSI) { 2684 ah = to_iwah(ud_wr(ib_wr)->ah); 2685 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx; 2686 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey; 2687 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn; 2688 } 2689 2690 if (ib_wr->send_flags & IB_SEND_INLINE) 2691 err = irdma_uk_inline_send(ukqp, &info, false); 2692 else 2693 err = irdma_uk_send(ukqp, &info, false); 2694 break; 2695 case IB_WR_RDMA_WRITE_WITH_IMM: 2696 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) { 2697 info.imm_data_valid = true; 2698 info.imm_data = ntohl(ib_wr->ex.imm_data); 2699 } else { 2700 err = -EINVAL; 2701 break; 2702 } 2703 /* fallthrough */ 2704 case IB_WR_RDMA_WRITE: 2705 if (ib_wr->send_flags & IB_SEND_SOLICITED) 2706 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL; 2707 else 2708 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE; 2709 2710 info.op.rdma_write.num_lo_sges = ib_wr->num_sge; 2711 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; 2712 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; 2713 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; 2714 if (ib_wr->send_flags & IB_SEND_INLINE) 2715 err = irdma_uk_inline_rdma_write(ukqp, &info, false); 2716 else 2717 err = irdma_uk_rdma_write(ukqp, &info, false); 2718 break; 2719 case IB_WR_RDMA_READ_WITH_INV: 2720 inv_stag = true; 2721 /* fallthrough */ 2722 case IB_WR_RDMA_READ: 2723 if (ib_wr->num_sge > 2724 dev->hw_attrs.uk_attrs.max_hw_read_sges) { 2725 err = -EINVAL; 2726 break; 2727 } 2728 info.op_type = IRDMA_OP_TYPE_RDMA_READ; 2729 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; 2730 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; 2731 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; 2732 info.op.rdma_read.num_lo_sges = ib_wr->num_sge; 2733 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); 2734 break; 2735 case IB_WR_LOCAL_INV: 2736 info.op_type = IRDMA_OP_TYPE_INV_STAG; 2737 info.local_fence = info.read_fence; 2738 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; 2739 err = irdma_uk_stag_local_invalidate(ukqp, &info, true); 2740 break; 2741 case IB_WR_REG_MR:{ 2742 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); 2743 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; 2744 struct irdma_fast_reg_stag_info stag_info = {0}; 2745 2746 stag_info.signaled = info.signaled; 2747 stag_info.read_fence = info.read_fence; 2748 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access); 2749 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff; 2750 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8; 2751 stag_info.page_size = reg_wr(ib_wr)->mr->page_size; 2752 stag_info.wr_id = ib_wr->wr_id; 2753 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED; 2754 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova; 2755 stag_info.total_len = iwmr->ibmr.length; 2756 if (palloc->level == PBLE_LEVEL_2) { 2757 stag_info.chunk_size = 3; 2758 stag_info.first_pm_pbl_index = palloc->level2.root.idx; 2759 } else { 2760 stag_info.chunk_size = 1; 2761 stag_info.first_pm_pbl_index = palloc->level1.idx; 2762 } 2763 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; 2764 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info, 2765 true); 2766 break; 2767 } 2768 default: 2769 err = -EINVAL; 2770 irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 2771 "upost_send bad opcode = 0x%x\n", 2772 ib_wr->opcode); 2773 break; 2774 } 2775 2776 if (err) 2777 break; 2778 ib_wr = ib_wr->next; 2779 } 2780 2781 if (!iwqp->flush_issued) { 2782 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) 2783 irdma_uk_qp_post_wr(ukqp); 2784 spin_unlock_irqrestore(&iwqp->lock, flags); 2785 } else { 2786 spin_unlock_irqrestore(&iwqp->lock, flags); 2787 irdma_sched_qp_flush_work(iwqp); 2788 } 2789 2790 if (err) 2791 *bad_wr = ib_wr; 2792 2793 return err; 2794 } 2795 2796 /** 2797 * irdma_post_recv - post receive wr for kernel application 2798 * @ibqp: ib qp pointer 2799 * @ib_wr: work request for receive 2800 * @bad_wr: bad wr caused an error 2801 */ 2802 static int 2803 irdma_post_recv(struct ib_qp *ibqp, 2804 const struct ib_recv_wr *ib_wr, 2805 const struct ib_recv_wr **bad_wr) 2806 { 2807 struct irdma_qp *iwqp = to_iwqp(ibqp); 2808 struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk; 2809 struct irdma_post_rq_info post_recv = {0}; 2810 struct irdma_sge *sg_list = iwqp->sg_list; 2811 unsigned long flags; 2812 int err = 0; 2813 2814 spin_lock_irqsave(&iwqp->lock, flags); 2815 2816 while (ib_wr) { 2817 if (ib_wr->num_sge > ukqp->max_rq_frag_cnt) { 2818 err = -EINVAL; 2819 goto out; 2820 } 2821 post_recv.num_sges = ib_wr->num_sge; 2822 post_recv.wr_id = ib_wr->wr_id; 2823 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); 2824 post_recv.sg_list = sg_list; 2825 err = irdma_uk_post_receive(ukqp, &post_recv); 2826 if (err) { 2827 irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 2828 "post_recv err %d\n", 2829 err); 2830 goto out; 2831 } 2832 2833 ib_wr = ib_wr->next; 2834 } 2835 2836 out: 2837 spin_unlock_irqrestore(&iwqp->lock, flags); 2838 if (iwqp->flush_issued) 2839 irdma_sched_qp_flush_work(iwqp); 2840 2841 if (err) 2842 *bad_wr = ib_wr; 2843 2844 return err; 2845 } 2846 2847 /** 2848 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status 2849 * @opcode: iwarp flush code 2850 */ 2851 static enum ib_wc_status 2852 irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode) 2853 { 2854 switch (opcode) { 2855 case FLUSH_PROT_ERR: 2856 return IB_WC_LOC_PROT_ERR; 2857 case FLUSH_REM_ACCESS_ERR: 2858 return IB_WC_REM_ACCESS_ERR; 2859 case FLUSH_LOC_QP_OP_ERR: 2860 return IB_WC_LOC_QP_OP_ERR; 2861 case FLUSH_REM_OP_ERR: 2862 return IB_WC_REM_OP_ERR; 2863 case FLUSH_LOC_LEN_ERR: 2864 return IB_WC_LOC_LEN_ERR; 2865 case FLUSH_GENERAL_ERR: 2866 return IB_WC_WR_FLUSH_ERR; 2867 case FLUSH_MW_BIND_ERR: 2868 return IB_WC_MW_BIND_ERR; 2869 case FLUSH_REM_INV_REQ_ERR: 2870 return IB_WC_REM_INV_REQ_ERR; 2871 case FLUSH_RETRY_EXC_ERR: 2872 return IB_WC_RETRY_EXC_ERR; 2873 case FLUSH_FATAL_ERR: 2874 default: 2875 return IB_WC_FATAL_ERR; 2876 } 2877 } 2878 2879 static inline void 2880 set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info, 2881 struct ib_wc *entry) 2882 { 2883 struct irdma_sc_qp *qp; 2884 2885 switch (cq_poll_info->op_type) { 2886 case IRDMA_OP_TYPE_RDMA_WRITE: 2887 case IRDMA_OP_TYPE_RDMA_WRITE_SOL: 2888 entry->opcode = IB_WC_RDMA_WRITE; 2889 break; 2890 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG: 2891 case IRDMA_OP_TYPE_RDMA_READ: 2892 entry->opcode = IB_WC_RDMA_READ; 2893 break; 2894 case IRDMA_OP_TYPE_SEND_SOL: 2895 case IRDMA_OP_TYPE_SEND_SOL_INV: 2896 case IRDMA_OP_TYPE_SEND_INV: 2897 case IRDMA_OP_TYPE_SEND: 2898 entry->opcode = IB_WC_SEND; 2899 break; 2900 case IRDMA_OP_TYPE_FAST_REG_NSMR: 2901 entry->opcode = IB_WC_REG_MR; 2902 break; 2903 case IRDMA_OP_TYPE_INV_STAG: 2904 entry->opcode = IB_WC_LOCAL_INV; 2905 break; 2906 default: 2907 qp = cq_poll_info->qp_handle; 2908 irdma_dev_err(to_ibdev(qp->dev), "Invalid opcode = %d in CQE\n", 2909 cq_poll_info->op_type); 2910 entry->status = IB_WC_GENERAL_ERR; 2911 } 2912 } 2913 2914 static inline void 2915 set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info, 2916 struct ib_wc *entry, bool send_imm_support) 2917 { 2918 /** 2919 * iWARP does not support sendImm, so the presence of Imm data 2920 * must be WriteImm. 2921 */ 2922 if (!send_imm_support) { 2923 entry->opcode = cq_poll_info->imm_valid ? 2924 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; 2925 return; 2926 } 2927 switch (cq_poll_info->op_type) { 2928 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: 2929 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: 2930 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2931 break; 2932 default: 2933 entry->opcode = IB_WC_RECV; 2934 } 2935 } 2936 2937 /** 2938 * irdma_process_cqe - process cqe info 2939 * @entry: processed cqe 2940 * @cq_poll_info: cqe info 2941 */ 2942 static void 2943 irdma_process_cqe(struct ib_wc *entry, 2944 struct irdma_cq_poll_info *cq_poll_info) 2945 { 2946 struct irdma_sc_qp *qp; 2947 2948 entry->wc_flags = 0; 2949 entry->pkey_index = 0; 2950 entry->wr_id = cq_poll_info->wr_id; 2951 2952 qp = cq_poll_info->qp_handle; 2953 entry->qp = qp->qp_uk.back_qp; 2954 2955 if (cq_poll_info->error) { 2956 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ? 2957 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR; 2958 2959 entry->vendor_err = cq_poll_info->major_err << 16 | 2960 cq_poll_info->minor_err; 2961 } else { 2962 entry->status = IB_WC_SUCCESS; 2963 if (cq_poll_info->imm_valid) { 2964 entry->ex.imm_data = htonl(cq_poll_info->imm_data); 2965 entry->wc_flags |= IB_WC_WITH_IMM; 2966 } 2967 if (cq_poll_info->ud_smac_valid) { 2968 ether_addr_copy(entry->smac, cq_poll_info->ud_smac); 2969 entry->wc_flags |= IB_WC_WITH_SMAC; 2970 } 2971 2972 if (cq_poll_info->ud_vlan_valid) { 2973 u16 vlan = cq_poll_info->ud_vlan & EVL_VLID_MASK; 2974 2975 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT; 2976 if (vlan) { 2977 entry->vlan_id = vlan; 2978 entry->wc_flags |= IB_WC_WITH_VLAN; 2979 } 2980 } else { 2981 entry->sl = 0; 2982 } 2983 } 2984 2985 if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) { 2986 set_ib_wc_op_sq(cq_poll_info, entry); 2987 } else { 2988 set_ib_wc_op_rq(cq_poll_info, entry, 2989 qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ? 2990 true : false); 2991 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD && 2992 cq_poll_info->stag_invalid_set) { 2993 entry->ex.invalidate_rkey = cq_poll_info->inv_stag; 2994 entry->wc_flags |= IB_WC_WITH_INVALIDATE; 2995 } 2996 } 2997 2998 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) { 2999 entry->src_qp = cq_poll_info->ud_src_qpn; 3000 entry->slid = 0; 3001 entry->wc_flags |= 3002 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE); 3003 entry->network_hdr_type = cq_poll_info->ipv4 ? 3004 RDMA_NETWORK_IPV4 : 3005 RDMA_NETWORK_IPV6; 3006 } else { 3007 entry->src_qp = cq_poll_info->qp_id; 3008 } 3009 3010 entry->byte_len = cq_poll_info->bytes_xfered; 3011 } 3012 3013 /** 3014 * irdma_poll_one - poll one entry of the CQ 3015 * @ukcq: ukcq to poll 3016 * @cur_cqe: current CQE info to be filled in 3017 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ 3018 * 3019 * Returns the internal irdma device error code or 0 on success 3020 */ 3021 static inline int 3022 irdma_poll_one(struct irdma_cq_uk *ukcq, 3023 struct irdma_cq_poll_info *cur_cqe, 3024 struct ib_wc *entry) 3025 { 3026 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe); 3027 3028 if (ret) 3029 return ret; 3030 3031 irdma_process_cqe(entry, cur_cqe); 3032 3033 return 0; 3034 } 3035 3036 /** 3037 * __irdma_poll_cq - poll cq for completion (kernel apps) 3038 * @iwcq: cq to poll 3039 * @num_entries: number of entries to poll 3040 * @entry: wr of a completed entry 3041 */ 3042 static int 3043 __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry) 3044 { 3045 struct list_head *tmp_node, *list_node; 3046 struct irdma_cq_buf *last_buf = NULL; 3047 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; 3048 struct irdma_cq_buf *cq_buf; 3049 int ret; 3050 struct irdma_device *iwdev; 3051 struct irdma_cq_uk *ukcq; 3052 bool cq_new_cqe = false; 3053 int resized_bufs = 0; 3054 int npolled = 0; 3055 3056 iwdev = to_iwdev(iwcq->ibcq.device); 3057 ukcq = &iwcq->sc_cq.cq_uk; 3058 3059 /* go through the list of previously resized CQ buffers */ 3060 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) { 3061 cq_buf = container_of(list_node, struct irdma_cq_buf, list); 3062 while (npolled < num_entries) { 3063 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled); 3064 if (!ret) { 3065 ++npolled; 3066 cq_new_cqe = true; 3067 continue; 3068 } 3069 if (ret == -ENOENT) 3070 break; 3071 /* QP using the CQ is destroyed. Skip reporting this CQE */ 3072 if (ret == -EFAULT) { 3073 cq_new_cqe = true; 3074 continue; 3075 } 3076 goto error; 3077 } 3078 3079 /* save the resized CQ buffer which received the last cqe */ 3080 if (cq_new_cqe) 3081 last_buf = cq_buf; 3082 cq_new_cqe = false; 3083 } 3084 3085 /* check the current CQ for new cqes */ 3086 while (npolled < num_entries) { 3087 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled); 3088 if (ret == -ENOENT) { 3089 ret = irdma_generated_cmpls(iwcq, cur_cqe); 3090 if (!ret) 3091 irdma_process_cqe(entry + npolled, cur_cqe); 3092 } 3093 if (!ret) { 3094 ++npolled; 3095 cq_new_cqe = true; 3096 continue; 3097 } 3098 3099 if (ret == -ENOENT) 3100 break; 3101 /* QP using the CQ is destroyed. Skip reporting this CQE */ 3102 if (ret == -EFAULT) { 3103 cq_new_cqe = true; 3104 continue; 3105 } 3106 goto error; 3107 } 3108 3109 if (cq_new_cqe) 3110 /* all previous CQ resizes are complete */ 3111 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL); 3112 else if (last_buf) 3113 /* only CQ resizes up to the last_buf are complete */ 3114 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf); 3115 if (resized_bufs) 3116 /* report to the HW the number of complete CQ resizes */ 3117 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs); 3118 3119 return npolled; 3120 error: 3121 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 3122 "%s: Error polling CQ, irdma_err: %d\n", 3123 __func__, ret); 3124 3125 return ret; 3126 } 3127 3128 /** 3129 * irdma_poll_cq - poll cq for completion (kernel apps) 3130 * @ibcq: cq to poll 3131 * @num_entries: number of entries to poll 3132 * @entry: wr of a completed entry 3133 */ 3134 static int 3135 irdma_poll_cq(struct ib_cq *ibcq, int num_entries, 3136 struct ib_wc *entry) 3137 { 3138 struct irdma_cq *iwcq; 3139 unsigned long flags; 3140 int ret; 3141 3142 iwcq = to_iwcq(ibcq); 3143 3144 spin_lock_irqsave(&iwcq->lock, flags); 3145 ret = __irdma_poll_cq(iwcq, num_entries, entry); 3146 spin_unlock_irqrestore(&iwcq->lock, flags); 3147 3148 return ret; 3149 } 3150 3151 /** 3152 * irdma_req_notify_cq - arm cq kernel application 3153 * @ibcq: cq to arm 3154 * @notify_flags: notofication flags 3155 */ 3156 static int 3157 irdma_req_notify_cq(struct ib_cq *ibcq, 3158 enum ib_cq_notify_flags notify_flags) 3159 { 3160 struct irdma_cq *iwcq; 3161 struct irdma_cq_uk *ukcq; 3162 unsigned long flags; 3163 enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT; 3164 bool promo_event = false; 3165 int ret = 0; 3166 3167 iwcq = to_iwcq(ibcq); 3168 ukcq = &iwcq->sc_cq.cq_uk; 3169 3170 spin_lock_irqsave(&iwcq->lock, flags); 3171 if (notify_flags == IB_CQ_SOLICITED) { 3172 cq_notify = IRDMA_CQ_COMPL_SOLICITED; 3173 } else { 3174 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED) 3175 promo_event = true; 3176 } 3177 3178 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) { 3179 iwcq->last_notify = cq_notify; 3180 irdma_uk_cq_request_notification(ukcq, cq_notify); 3181 } 3182 3183 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && 3184 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) 3185 ret = 1; 3186 spin_unlock_irqrestore(&iwcq->lock, flags); 3187 3188 return ret; 3189 } 3190 3191 /** 3192 * mcast_list_add - Add a new mcast item to list 3193 * @rf: RDMA PCI function 3194 * @new_elem: pointer to element to add 3195 */ 3196 static void 3197 mcast_list_add(struct irdma_pci_f *rf, 3198 struct mc_table_list *new_elem) 3199 { 3200 list_add(&new_elem->list, &rf->mc_qht_list.list); 3201 } 3202 3203 /** 3204 * mcast_list_del - Remove an mcast item from list 3205 * @mc_qht_elem: pointer to mcast table list element 3206 */ 3207 static void 3208 mcast_list_del(struct mc_table_list *mc_qht_elem) 3209 { 3210 if (mc_qht_elem) 3211 list_del(&mc_qht_elem->list); 3212 } 3213 3214 /** 3215 * mcast_list_lookup_ip - Search mcast list for address 3216 * @rf: RDMA PCI function 3217 * @ip_mcast: pointer to mcast IP address 3218 */ 3219 static struct mc_table_list * 3220 mcast_list_lookup_ip(struct irdma_pci_f *rf, 3221 u32 *ip_mcast) 3222 { 3223 struct mc_table_list *mc_qht_el; 3224 struct list_head *pos, *q; 3225 3226 list_for_each_safe(pos, q, &rf->mc_qht_list.list) { 3227 mc_qht_el = list_entry(pos, struct mc_table_list, list); 3228 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast, 3229 sizeof(mc_qht_el->mc_info.dest_ip))) 3230 return mc_qht_el; 3231 } 3232 3233 return NULL; 3234 } 3235 3236 /** 3237 * irdma_mcast_cqp_op - perform a mcast cqp operation 3238 * @iwdev: irdma device 3239 * @mc_grp_ctx: mcast group info 3240 * @op: operation 3241 * 3242 * returns error status 3243 */ 3244 static int 3245 irdma_mcast_cqp_op(struct irdma_device *iwdev, 3246 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op) 3247 { 3248 struct cqp_cmds_info *cqp_info; 3249 struct irdma_cqp_request *cqp_request; 3250 int status; 3251 3252 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); 3253 if (!cqp_request) 3254 return -ENOMEM; 3255 3256 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx; 3257 cqp_info = &cqp_request->info; 3258 cqp_info->cqp_cmd = op; 3259 cqp_info->post_sq = 1; 3260 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request; 3261 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; 3262 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); 3263 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); 3264 3265 return status; 3266 } 3267 3268 /** 3269 * irdma_attach_mcast - attach a qp to a multicast group 3270 * @ibqp: ptr to qp 3271 * @ibgid: pointer to global ID 3272 * @lid: local ID 3273 * 3274 * returns error status 3275 */ 3276 static int 3277 irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 3278 { 3279 struct irdma_qp *iwqp = to_iwqp(ibqp); 3280 struct irdma_device *iwdev = iwqp->iwdev; 3281 struct irdma_pci_f *rf = iwdev->rf; 3282 struct mc_table_list *mc_qht_elem; 3283 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0}; 3284 unsigned long flags; 3285 u32 ip_addr[4] = {0}; 3286 u32 mgn; 3287 u32 no_mgs; 3288 int ret = 0; 3289 bool ipv4; 3290 u16 vlan_id; 3291 union { 3292 struct sockaddr saddr; 3293 struct sockaddr_in saddr_in; 3294 struct sockaddr_in6 saddr_in6; 3295 } sgid_addr; 3296 unsigned char dmac[ETH_ALEN]; 3297 3298 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 3299 3300 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) { 3301 irdma_copy_ip_ntohl(ip_addr, 3302 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32); 3303 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL); 3304 ipv4 = false; 3305 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 3306 "qp_id=%d, IP6address=%pI6\n", 3307 ibqp->qp_num, 3308 ip_addr); 3309 irdma_mcast_mac_v6(ip_addr, dmac); 3310 } else { 3311 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 3312 ipv4 = true; 3313 vlan_id = irdma_get_vlan_ipv4(ip_addr); 3314 irdma_mcast_mac_v4(ip_addr, dmac); 3315 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 3316 "qp_id=%d, IP4address=%pI4, MAC=%pM\n", 3317 ibqp->qp_num, ip_addr, dmac); 3318 } 3319 3320 spin_lock_irqsave(&rf->qh_list_lock, flags); 3321 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 3322 if (!mc_qht_elem) { 3323 struct irdma_dma_mem *dma_mem_mc; 3324 3325 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3326 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL); 3327 if (!mc_qht_elem) 3328 return -ENOMEM; 3329 3330 mc_qht_elem->mc_info.ipv4_valid = ipv4; 3331 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr, 3332 sizeof(mc_qht_elem->mc_info.dest_ip)); 3333 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg, 3334 &mgn, &rf->next_mcg); 3335 if (ret) { 3336 kfree(mc_qht_elem); 3337 return -ENOMEM; 3338 } 3339 3340 mc_qht_elem->mc_info.mgn = mgn; 3341 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc; 3342 dma_mem_mc->size = sizeof(u64)* IRDMA_MAX_MGS_PER_CTX; 3343 dma_mem_mc->va = irdma_allocate_dma_mem(&rf->hw, dma_mem_mc, 3344 dma_mem_mc->size, 3345 IRDMA_HW_PAGE_SIZE); 3346 if (!dma_mem_mc->va) { 3347 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn); 3348 kfree(mc_qht_elem); 3349 return -ENOMEM; 3350 } 3351 3352 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn; 3353 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr, 3354 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr)); 3355 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4; 3356 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id; 3357 if (vlan_id < VLAN_N_VID) 3358 mc_qht_elem->mc_grp_ctx.vlan_valid = true; 3359 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id; 3360 mc_qht_elem->mc_grp_ctx.qs_handle = 3361 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle; 3362 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac); 3363 3364 spin_lock_irqsave(&rf->qh_list_lock, flags); 3365 mcast_list_add(rf, mc_qht_elem); 3366 } else { 3367 if (mc_qht_elem->mc_grp_ctx.no_of_mgs == 3368 IRDMA_MAX_MGS_PER_CTX) { 3369 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3370 return -ENOMEM; 3371 } 3372 } 3373 3374 mcg_info.qp_id = iwqp->ibqp.qp_num; 3375 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs; 3376 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 3377 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3378 3379 /* Only if there is a change do we need to modify or create */ 3380 if (!no_mgs) { 3381 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 3382 IRDMA_OP_MC_CREATE); 3383 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) { 3384 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 3385 IRDMA_OP_MC_MODIFY); 3386 } else { 3387 return 0; 3388 } 3389 3390 if (ret) 3391 goto error; 3392 3393 return 0; 3394 3395 error: 3396 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 3397 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 3398 mcast_list_del(mc_qht_elem); 3399 irdma_free_dma_mem(&rf->hw, 3400 &mc_qht_elem->mc_grp_ctx.dma_mem_mc); 3401 irdma_free_rsrc(rf, rf->allocated_mcgs, 3402 mc_qht_elem->mc_grp_ctx.mg_id); 3403 kfree(mc_qht_elem); 3404 } 3405 3406 return ret; 3407 } 3408 3409 /** 3410 * irdma_detach_mcast - detach a qp from a multicast group 3411 * @ibqp: ptr to qp 3412 * @ibgid: pointer to global ID 3413 * @lid: local ID 3414 * 3415 * returns error status 3416 */ 3417 static int 3418 irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) 3419 { 3420 struct irdma_qp *iwqp = to_iwqp(ibqp); 3421 struct irdma_device *iwdev = iwqp->iwdev; 3422 struct irdma_pci_f *rf = iwdev->rf; 3423 u32 ip_addr[4] = {0}; 3424 struct mc_table_list *mc_qht_elem; 3425 struct irdma_mcast_grp_ctx_entry_info mcg_info = {0}; 3426 int ret; 3427 unsigned long flags; 3428 union { 3429 struct sockaddr saddr; 3430 struct sockaddr_in saddr_in; 3431 struct sockaddr_in6 saddr_in6; 3432 } sgid_addr; 3433 3434 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid); 3435 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) 3436 irdma_copy_ip_ntohl(ip_addr, 3437 sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32); 3438 else 3439 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr); 3440 3441 spin_lock_irqsave(&rf->qh_list_lock, flags); 3442 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr); 3443 if (!mc_qht_elem) { 3444 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3445 irdma_debug(&iwdev->rf->sc_dev, 3446 IRDMA_DEBUG_VERBS, "address not found MCG\n"); 3447 return 0; 3448 } 3449 3450 mcg_info.qp_id = iwqp->ibqp.qp_num; 3451 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info); 3452 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) { 3453 mcast_list_del(mc_qht_elem); 3454 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3455 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 3456 IRDMA_OP_MC_DESTROY); 3457 if (ret) { 3458 irdma_debug(&iwdev->rf->sc_dev, 3459 IRDMA_DEBUG_VERBS, "failed MC_DESTROY MCG\n"); 3460 spin_lock_irqsave(&rf->qh_list_lock, flags); 3461 mcast_list_add(rf, mc_qht_elem); 3462 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3463 return -EAGAIN; 3464 } 3465 3466 irdma_free_dma_mem(&rf->hw, 3467 &mc_qht_elem->mc_grp_ctx.dma_mem_mc); 3468 irdma_free_rsrc(rf, rf->allocated_mcgs, 3469 mc_qht_elem->mc_grp_ctx.mg_id); 3470 kfree(mc_qht_elem); 3471 } else { 3472 spin_unlock_irqrestore(&rf->qh_list_lock, flags); 3473 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx, 3474 IRDMA_OP_MC_MODIFY); 3475 if (ret) { 3476 irdma_debug(&iwdev->rf->sc_dev, 3477 IRDMA_DEBUG_VERBS, "failed Modify MCG\n"); 3478 return ret; 3479 } 3480 } 3481 3482 return 0; 3483 } 3484 3485 /** 3486 * irdma_query_ah - Query address handle 3487 * @ibah: pointer to address handle 3488 * @ah_attr: address handle attributes 3489 */ 3490 static int 3491 irdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 3492 { 3493 struct irdma_ah *ah = to_iwah(ibah); 3494 3495 memset(ah_attr, 0, sizeof(*ah_attr)); 3496 if (ah->av.attrs.ah_flags & IB_AH_GRH) { 3497 ah_attr->ah_flags = IB_AH_GRH; 3498 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label; 3499 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos; 3500 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl; 3501 ah_attr->grh.sgid_index = ah->sgid_index; 3502 ah_attr->grh.sgid_index = ah->sgid_index; 3503 memcpy(&ah_attr->grh.dgid, &ah->dgid, 3504 sizeof(ah_attr->grh.dgid)); 3505 } 3506 3507 return 0; 3508 } 3509 3510 static struct ifnet * 3511 irdma_get_netdev(struct ib_device *ibdev, u8 port_num) 3512 { 3513 struct irdma_device *iwdev = to_iwdev(ibdev); 3514 3515 if (iwdev->netdev) { 3516 dev_hold(iwdev->netdev); 3517 return iwdev->netdev; 3518 } 3519 3520 return NULL; 3521 } 3522 3523 static void 3524 irdma_set_device_ops(struct ib_device *ibdev) 3525 { 3526 struct ib_device *dev_ops = ibdev; 3527 3528 #if __FreeBSD_version >= 1400000 3529 dev_ops->ops.driver_id = RDMA_DRIVER_I40IW; 3530 dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah); 3531 dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq); 3532 dev_ops->ops.size_ib_pd = IRDMA_SET_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd); 3533 dev_ops->ops.size_ib_ucontext = IRDMA_SET_RDMA_OBJ_SIZE(ib_ucontext, 3534 irdma_ucontext, 3535 ibucontext); 3536 3537 #endif /* __FreeBSD_version >= 1400000 */ 3538 dev_ops->alloc_hw_stats = irdma_alloc_hw_stats; 3539 dev_ops->alloc_mr = irdma_alloc_mr; 3540 dev_ops->alloc_mw = irdma_alloc_mw; 3541 dev_ops->alloc_pd = irdma_alloc_pd; 3542 dev_ops->alloc_ucontext = irdma_alloc_ucontext; 3543 dev_ops->create_cq = irdma_create_cq; 3544 dev_ops->create_qp = irdma_create_qp; 3545 dev_ops->dealloc_mw = irdma_dealloc_mw; 3546 dev_ops->dealloc_pd = irdma_dealloc_pd; 3547 dev_ops->dealloc_ucontext = irdma_dealloc_ucontext; 3548 dev_ops->dereg_mr = irdma_dereg_mr; 3549 dev_ops->destroy_cq = irdma_destroy_cq; 3550 dev_ops->destroy_qp = irdma_destroy_qp; 3551 dev_ops->disassociate_ucontext = irdma_disassociate_ucontext; 3552 dev_ops->get_dev_fw_str = irdma_get_dev_fw_str; 3553 dev_ops->get_dma_mr = irdma_get_dma_mr; 3554 dev_ops->get_hw_stats = irdma_get_hw_stats; 3555 dev_ops->get_netdev = irdma_get_netdev; 3556 dev_ops->map_mr_sg = irdma_map_mr_sg; 3557 dev_ops->mmap = irdma_mmap; 3558 #if __FreeBSD_version >= 1400026 3559 dev_ops->mmap_free = irdma_mmap_free; 3560 #endif 3561 dev_ops->poll_cq = irdma_poll_cq; 3562 dev_ops->post_recv = irdma_post_recv; 3563 dev_ops->post_send = irdma_post_send; 3564 dev_ops->query_device = irdma_query_device; 3565 dev_ops->query_port = irdma_query_port; 3566 dev_ops->modify_port = irdma_modify_port; 3567 dev_ops->query_qp = irdma_query_qp; 3568 dev_ops->reg_user_mr = irdma_reg_user_mr; 3569 dev_ops->rereg_user_mr = irdma_rereg_user_mr; 3570 dev_ops->req_notify_cq = irdma_req_notify_cq; 3571 dev_ops->resize_cq = irdma_resize_cq; 3572 } 3573 3574 static void 3575 irdma_set_device_mcast_ops(struct ib_device *ibdev) 3576 { 3577 struct ib_device *dev_ops = ibdev; 3578 dev_ops->attach_mcast = irdma_attach_mcast; 3579 dev_ops->detach_mcast = irdma_detach_mcast; 3580 } 3581 3582 static void 3583 irdma_set_device_roce_ops(struct ib_device *ibdev) 3584 { 3585 struct ib_device *dev_ops = ibdev; 3586 dev_ops->create_ah = irdma_create_ah; 3587 dev_ops->destroy_ah = irdma_destroy_ah; 3588 dev_ops->get_link_layer = irdma_get_link_layer; 3589 dev_ops->get_port_immutable = irdma_roce_port_immutable; 3590 dev_ops->modify_qp = irdma_modify_qp_roce; 3591 dev_ops->query_ah = irdma_query_ah; 3592 dev_ops->query_gid = irdma_query_gid_roce; 3593 dev_ops->query_pkey = irdma_query_pkey; 3594 ibdev->add_gid = irdma_add_gid; 3595 ibdev->del_gid = irdma_del_gid; 3596 } 3597 3598 static void 3599 irdma_set_device_iw_ops(struct ib_device *ibdev) 3600 { 3601 struct ib_device *dev_ops = ibdev; 3602 3603 ibdev->uverbs_cmd_mask |= 3604 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 3605 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 3606 3607 dev_ops->create_ah = irdma_create_ah_stub; 3608 dev_ops->destroy_ah = irdma_destroy_ah_stub; 3609 dev_ops->get_port_immutable = irdma_iw_port_immutable; 3610 dev_ops->modify_qp = irdma_modify_qp; 3611 dev_ops->query_gid = irdma_query_gid; 3612 dev_ops->query_pkey = irdma_iw_query_pkey; 3613 } 3614 3615 static inline void 3616 irdma_set_device_gen1_ops(struct ib_device *ibdev) 3617 { 3618 } 3619 3620 /** 3621 * irdma_init_roce_device - initialization of roce rdma device 3622 * @iwdev: irdma device 3623 */ 3624 static void 3625 irdma_init_roce_device(struct irdma_device *iwdev) 3626 { 3627 kc_set_roce_uverbs_cmd_mask(iwdev); 3628 iwdev->ibdev.node_type = RDMA_NODE_IB_CA; 3629 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 3630 IF_LLADDR(iwdev->netdev)); 3631 irdma_set_device_roce_ops(&iwdev->ibdev); 3632 if (iwdev->rf->rdma_ver == IRDMA_GEN_2) 3633 irdma_set_device_mcast_ops(&iwdev->ibdev); 3634 } 3635 3636 /** 3637 * irdma_init_iw_device - initialization of iwarp rdma device 3638 * @iwdev: irdma device 3639 */ 3640 static int 3641 irdma_init_iw_device(struct irdma_device *iwdev) 3642 { 3643 struct ifnet *netdev = iwdev->netdev; 3644 3645 iwdev->ibdev.node_type = RDMA_NODE_RNIC; 3646 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, 3647 IF_LLADDR(netdev)); 3648 iwdev->ibdev.iwcm = kzalloc(sizeof(*iwdev->ibdev.iwcm), GFP_KERNEL); 3649 if (!iwdev->ibdev.iwcm) 3650 return -ENOMEM; 3651 3652 iwdev->ibdev.iwcm->add_ref = irdma_qp_add_ref; 3653 iwdev->ibdev.iwcm->rem_ref = irdma_qp_rem_ref; 3654 iwdev->ibdev.iwcm->get_qp = irdma_get_qp; 3655 iwdev->ibdev.iwcm->connect = irdma_connect; 3656 iwdev->ibdev.iwcm->accept = irdma_accept; 3657 iwdev->ibdev.iwcm->reject = irdma_reject; 3658 iwdev->ibdev.iwcm->create_listen = irdma_create_listen; 3659 iwdev->ibdev.iwcm->destroy_listen = irdma_destroy_listen; 3660 memcpy(iwdev->ibdev.iwcm->ifname, if_name(netdev), 3661 sizeof(iwdev->ibdev.iwcm->ifname)); 3662 irdma_set_device_iw_ops(&iwdev->ibdev); 3663 3664 return 0; 3665 } 3666 3667 /** 3668 * irdma_init_rdma_device - initialization of rdma device 3669 * @iwdev: irdma device 3670 */ 3671 static int 3672 irdma_init_rdma_device(struct irdma_device *iwdev) 3673 { 3674 int ret; 3675 3676 iwdev->ibdev.owner = THIS_MODULE; 3677 iwdev->ibdev.uverbs_abi_ver = IRDMA_ABI_VER; 3678 kc_set_rdma_uverbs_cmd_mask(iwdev); 3679 3680 if (iwdev->roce_mode) { 3681 irdma_init_roce_device(iwdev); 3682 } else { 3683 ret = irdma_init_iw_device(iwdev); 3684 if (ret) 3685 return ret; 3686 } 3687 3688 iwdev->ibdev.phys_port_cnt = 1; 3689 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count; 3690 iwdev->ibdev.dev.parent = iwdev->rf->dev_ctx.dev; 3691 set_ibdev_dma_device(iwdev->ibdev, &iwdev->rf->pcidev->dev); 3692 irdma_set_device_ops(&iwdev->ibdev); 3693 if (iwdev->rf->rdma_ver == IRDMA_GEN_1) 3694 irdma_set_device_gen1_ops(&iwdev->ibdev); 3695 3696 return 0; 3697 } 3698 3699 /** 3700 * irdma_port_ibevent - indicate port event 3701 * @iwdev: irdma device 3702 */ 3703 void 3704 irdma_port_ibevent(struct irdma_device *iwdev) 3705 { 3706 struct ib_event event; 3707 3708 event.device = &iwdev->ibdev; 3709 event.element.port_num = 1; 3710 event.event = 3711 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 3712 ib_dispatch_event(&event); 3713 } 3714 3715 /** 3716 * irdma_ib_unregister_device - unregister rdma device from IB 3717 * core 3718 * @iwdev: irdma device 3719 */ 3720 void 3721 irdma_ib_unregister_device(struct irdma_device *iwdev) 3722 { 3723 iwdev->iw_status = 0; 3724 irdma_port_ibevent(iwdev); 3725 ib_unregister_device(&iwdev->ibdev); 3726 dev_put(iwdev->netdev); 3727 kfree(iwdev->ibdev.iwcm); 3728 iwdev->ibdev.iwcm = NULL; 3729 } 3730 3731 /** 3732 * irdma_ib_register_device - register irdma device to IB core 3733 * @iwdev: irdma device 3734 */ 3735 int 3736 irdma_ib_register_device(struct irdma_device *iwdev) 3737 { 3738 int ret; 3739 3740 ret = irdma_init_rdma_device(iwdev); 3741 if (ret) 3742 return ret; 3743 3744 dev_hold(iwdev->netdev); 3745 sprintf(iwdev->ibdev.name, "irdma-%s", if_name(iwdev->netdev)); 3746 ret = ib_register_device(&iwdev->ibdev, NULL); 3747 if (ret) 3748 goto error; 3749 3750 iwdev->iw_status = 1; 3751 irdma_port_ibevent(iwdev); 3752 3753 return 0; 3754 3755 error: 3756 kfree(iwdev->ibdev.iwcm); 3757 iwdev->ibdev.iwcm = NULL; 3758 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "Register RDMA device fail\n"); 3759 3760 return ret; 3761 } 3762