1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2018 - 2022 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "irdma_main.h" 36 37 #define IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) 38 39 static u16 kc_rdma_flow_label_to_udp_sport(u32 fl) { 40 u32 fl_low = fl & 0x03FFF; 41 u32 fl_high = fl & 0xFC000; 42 43 fl_low ^= fl_high >> 14; 44 45 return (u16)(fl_low | IRDMA_ROCE_UDP_ENCAP_VALID_PORT_MIN); 46 } 47 48 #define IRDMA_GRH_FLOWLABEL_MASK (0x000FFFFF) 49 50 static u32 kc_rdma_calc_flow_label(u32 lqpn, u32 rqpn) { 51 u64 fl = (u64)lqpn * rqpn; 52 53 fl ^= fl >> 20; 54 fl ^= fl >> 40; 55 56 return (u32)(fl & IRDMA_GRH_FLOWLABEL_MASK); 57 } 58 59 u16 60 kc_rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) 61 { 62 if (!fl) 63 fl = kc_rdma_calc_flow_label(lqpn, rqpn); 64 return kc_rdma_flow_label_to_udp_sport(fl); 65 } 66 67 void 68 irdma_get_dev_fw_str(struct ib_device *dev, 69 char *str, 70 size_t str_len) 71 { 72 struct irdma_device *iwdev = to_iwdev(dev); 73 74 snprintf(str, str_len, "%u.%u", 75 irdma_fw_major_ver(&iwdev->rf->sc_dev), 76 irdma_fw_minor_ver(&iwdev->rf->sc_dev)); 77 } 78 79 int 80 irdma_add_gid(struct ib_device *device, 81 u8 port_num, 82 unsigned int index, 83 const union ib_gid *gid, 84 const struct ib_gid_attr *attr, 85 void **context) 86 { 87 return 0; 88 } 89 90 int 91 irdma_del_gid(struct ib_device *device, 92 u8 port_num, 93 unsigned int index, 94 void **context) 95 { 96 return 0; 97 } 98 99 #if __FreeBSD_version >= 1400026 100 /** 101 * irdma_alloc_mr - register stag for fast memory registration 102 * @pd: ibpd pointer 103 * @mr_type: memory for stag registrion 104 * @max_num_sg: man number of pages 105 * @udata: user data 106 */ 107 struct ib_mr * 108 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 109 u32 max_num_sg, struct ib_udata *udata) 110 { 111 #else 112 /** 113 * irdma_alloc_mr - register stag for fast memory registration 114 * @pd: ibpd pointer 115 * @mr_type: memory for stag registrion 116 * @max_num_sg: man number of pages 117 */ 118 struct ib_mr * 119 irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 120 u32 max_num_sg) 121 { 122 #endif 123 struct irdma_device *iwdev = to_iwdev(pd->device); 124 struct irdma_pble_alloc *palloc; 125 struct irdma_pbl *iwpbl; 126 struct irdma_mr *iwmr; 127 int status; 128 u32 stag; 129 int err_code = -ENOMEM; 130 131 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 132 if (!iwmr) 133 return ERR_PTR(-ENOMEM); 134 135 stag = irdma_create_stag(iwdev); 136 if (!stag) { 137 err_code = -ENOMEM; 138 goto err; 139 } 140 141 iwmr->stag = stag; 142 iwmr->ibmr.rkey = stag; 143 iwmr->ibmr.lkey = stag; 144 iwmr->ibmr.pd = pd; 145 iwmr->ibmr.device = pd->device; 146 iwpbl = &iwmr->iwpbl; 147 iwpbl->iwmr = iwmr; 148 iwmr->type = IRDMA_MEMREG_TYPE_MEM; 149 palloc = &iwpbl->pble_alloc; 150 iwmr->page_cnt = max_num_sg; 151 /* Assume system PAGE_SIZE as the sg page sizes are unknown. */ 152 iwmr->len = max_num_sg * PAGE_SIZE; 153 status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt, 154 false); 155 if (status) 156 goto err_get_pble; 157 158 err_code = irdma_hw_alloc_stag(iwdev, iwmr); 159 if (err_code) 160 goto err_alloc_stag; 161 162 iwpbl->pbl_allocated = true; 163 164 return &iwmr->ibmr; 165 err_alloc_stag: 166 irdma_free_pble(iwdev->rf->pble_rsrc, palloc); 167 err_get_pble: 168 irdma_free_stag(iwdev, stag); 169 err: 170 kfree(iwmr); 171 172 return ERR_PTR(err_code); 173 } 174 175 #define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8) 176 #define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd) 177 #if __FreeBSD_version >= 1400026 178 /** 179 * irdma_alloc_ucontext - Allocate the user context data structure 180 * @uctx: context 181 * @udata: user data 182 * 183 * This keeps track of all objects associated with a particular 184 * user-mode client. 185 */ 186 int 187 irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) 188 { 189 struct ib_device *ibdev = uctx->device; 190 struct irdma_device *iwdev = to_iwdev(ibdev); 191 struct irdma_alloc_ucontext_req req = {0}; 192 struct irdma_alloc_ucontext_resp uresp = {0}; 193 struct irdma_ucontext *ucontext = to_ucontext(uctx); 194 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; 195 196 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || 197 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) 198 return -EINVAL; 199 200 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) 201 return -EINVAL; 202 203 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER) 204 goto ver_error; 205 206 ucontext->iwdev = iwdev; 207 ucontext->abi_ver = req.userspace_ver; 208 209 if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR) 210 ucontext->use_raw_attrs = true; 211 212 /* GEN_1 support for libi40iw */ 213 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { 214 if (uk_attrs->hw_rev != IRDMA_GEN_1) 215 return -EOPNOTSUPP; 216 217 ucontext->legacy_mode = true; 218 uresp.max_qps = iwdev->rf->max_qp; 219 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; 220 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; 221 uresp.kernel_ver = req.userspace_ver; 222 if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) 223 return -EFAULT; 224 } else { 225 u64 bar_off; 226 227 uresp.kernel_ver = IRDMA_ABI_VER; 228 uresp.feature_flags = uk_attrs->feature_flags; 229 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; 230 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; 231 uresp.max_hw_inline = uk_attrs->max_hw_inline; 232 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; 233 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; 234 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; 235 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; 236 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; 237 uresp.hw_rev = uk_attrs->hw_rev; 238 uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR; 239 240 bar_off = 241 (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 242 ucontext->db_mmap_entry = 243 irdma_user_mmap_entry_insert(ucontext, bar_off, 244 IRDMA_MMAP_IO_NC, 245 &uresp.db_mmap_key); 246 if (!ucontext->db_mmap_entry) { 247 return -ENOMEM; 248 } 249 250 if (ib_copy_to_udata(udata, &uresp, 251 min(sizeof(uresp), udata->outlen))) { 252 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 253 return -EFAULT; 254 } 255 } 256 257 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); 258 spin_lock_init(&ucontext->cq_reg_mem_list_lock); 259 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); 260 spin_lock_init(&ucontext->qp_reg_mem_list_lock); 261 INIT_LIST_HEAD(&ucontext->vma_list); 262 mutex_init(&ucontext->vma_list_mutex); 263 264 return 0; 265 266 ver_error: 267 irdma_dev_err(&iwdev->ibdev, 268 "Invalid userspace driver version detected. Detected version %d, should be %d\n", 269 req.userspace_ver, IRDMA_ABI_VER); 270 return -EINVAL; 271 } 272 #endif 273 274 #if __FreeBSD_version < 1400026 275 /** 276 * irdma_alloc_ucontext - Allocate the user context data structure 277 * @ibdev: ib device pointer 278 * @udata: user data 279 * 280 * This keeps track of all objects associated with a particular 281 * user-mode client. 282 */ 283 struct ib_ucontext * 284 irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) 285 { 286 struct irdma_device *iwdev = to_iwdev(ibdev); 287 struct irdma_alloc_ucontext_req req = {0}; 288 struct irdma_alloc_ucontext_resp uresp = {0}; 289 struct irdma_ucontext *ucontext; 290 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; 291 292 if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN || 293 udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN) 294 return ERR_PTR(-EINVAL); 295 296 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) 297 return ERR_PTR(-EINVAL); 298 299 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER) 300 goto ver_error; 301 302 ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); 303 if (!ucontext) 304 return ERR_PTR(-ENOMEM); 305 306 ucontext->iwdev = iwdev; 307 ucontext->abi_ver = req.userspace_ver; 308 309 if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR) 310 ucontext->use_raw_attrs = true; 311 312 /* GEN_1 legacy support with libi40iw */ 313 if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) { 314 if (uk_attrs->hw_rev != IRDMA_GEN_1) { 315 kfree(ucontext); 316 return ERR_PTR(-EOPNOTSUPP); 317 } 318 319 ucontext->legacy_mode = true; 320 uresp.max_qps = iwdev->rf->max_qp; 321 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds; 322 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2; 323 uresp.kernel_ver = req.userspace_ver; 324 if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) { 325 kfree(ucontext); 326 return ERR_PTR(-EFAULT); 327 } 328 } else { 329 u64 bar_off; 330 331 uresp.kernel_ver = IRDMA_ABI_VER; 332 uresp.feature_flags = uk_attrs->feature_flags; 333 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags; 334 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges; 335 uresp.max_hw_inline = uk_attrs->max_hw_inline; 336 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta; 337 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta; 338 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk; 339 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size; 340 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; 341 uresp.hw_rev = uk_attrs->hw_rev; 342 uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR; 343 344 bar_off = 345 (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET]; 346 347 spin_lock_init(&ucontext->mmap_tbl_lock); 348 ucontext->db_mmap_entry = 349 irdma_user_mmap_entry_add_hash(ucontext, bar_off, 350 IRDMA_MMAP_IO_NC, 351 &uresp.db_mmap_key); 352 if (!ucontext->db_mmap_entry) { 353 spin_lock_destroy(&ucontext->mmap_tbl_lock); 354 kfree(ucontext); 355 return ERR_PTR(-ENOMEM); 356 } 357 358 if (ib_copy_to_udata(udata, &uresp, 359 min(sizeof(uresp), udata->outlen))) { 360 irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry); 361 spin_lock_destroy(&ucontext->mmap_tbl_lock); 362 kfree(ucontext); 363 return ERR_PTR(-EFAULT); 364 } 365 } 366 367 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); 368 spin_lock_init(&ucontext->cq_reg_mem_list_lock); 369 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); 370 spin_lock_init(&ucontext->qp_reg_mem_list_lock); 371 INIT_LIST_HEAD(&ucontext->vma_list); 372 mutex_init(&ucontext->vma_list_mutex); 373 374 return &ucontext->ibucontext; 375 376 ver_error: 377 irdma_dev_err(&iwdev->ibdev, 378 "Invalid userspace driver version detected. Detected version %d, should be %d\n", 379 req.userspace_ver, IRDMA_ABI_VER); 380 return ERR_PTR(-EINVAL); 381 } 382 #endif 383 384 #if __FreeBSD_version >= 1400026 385 /** 386 * irdma_dealloc_ucontext - deallocate the user context data structure 387 * @context: user context created during alloc 388 */ 389 void 390 irdma_dealloc_ucontext(struct ib_ucontext *context) 391 { 392 struct irdma_ucontext *ucontext = to_ucontext(context); 393 394 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); 395 396 return; 397 } 398 #endif 399 400 #if __FreeBSD_version < 1400026 401 /** 402 * irdma_dealloc_ucontext - deallocate the user context data structure 403 * @context: user context created during alloc 404 */ 405 int 406 irdma_dealloc_ucontext(struct ib_ucontext *context) 407 { 408 struct irdma_ucontext *ucontext = to_ucontext(context); 409 410 irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry); 411 spin_lock_destroy(&ucontext->mmap_tbl_lock); 412 kfree(ucontext); 413 414 return 0; 415 } 416 #endif 417 418 #define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd) 419 #if __FreeBSD_version >= 1400026 420 /** 421 * irdma_alloc_pd - allocate protection domain 422 * @pd: protection domain 423 * @udata: user data 424 */ 425 int 426 irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) 427 { 428 struct irdma_pd *iwpd = to_iwpd(pd); 429 struct irdma_device *iwdev = to_iwdev(pd->device); 430 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 431 struct irdma_pci_f *rf = iwdev->rf; 432 struct irdma_alloc_pd_resp uresp = {0}; 433 struct irdma_sc_pd *sc_pd; 434 u32 pd_id = 0; 435 int err; 436 437 if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN) 438 return -EINVAL; 439 440 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, 441 &rf->next_pd); 442 if (err) 443 return err; 444 445 sc_pd = &iwpd->sc_pd; 446 if (udata) { 447 struct irdma_ucontext *ucontext = 448 rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 449 450 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); 451 uresp.pd_id = pd_id; 452 if (ib_copy_to_udata(udata, &uresp, 453 min(sizeof(uresp), udata->outlen))) { 454 err = -EFAULT; 455 goto error; 456 } 457 } else { 458 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER); 459 } 460 461 spin_lock_init(&iwpd->udqp_list_lock); 462 INIT_LIST_HEAD(&iwpd->udqp_list); 463 464 return 0; 465 466 error: 467 468 irdma_free_rsrc(rf, rf->allocated_pds, pd_id); 469 470 return err; 471 } 472 #endif 473 474 #if __FreeBSD_version < 1400026 475 /** 476 * irdma_alloc_pd - allocate protection domain 477 * @ibdev: IB device 478 * @context: user context 479 * @udata: user data 480 */ 481 struct ib_pd * 482 irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) 483 { 484 struct irdma_pd *iwpd; 485 struct irdma_device *iwdev = to_iwdev(ibdev); 486 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 487 struct irdma_pci_f *rf = iwdev->rf; 488 struct irdma_alloc_pd_resp uresp = {0}; 489 struct irdma_sc_pd *sc_pd; 490 u32 pd_id = 0; 491 int err; 492 493 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id, 494 &rf->next_pd); 495 if (err) 496 return ERR_PTR(err); 497 498 iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL); 499 if (!iwpd) { 500 err = -ENOMEM; 501 goto free_res; 502 } 503 504 sc_pd = &iwpd->sc_pd; 505 if (udata) { 506 struct irdma_ucontext *ucontext = to_ucontext(context); 507 508 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); 509 uresp.pd_id = pd_id; 510 if (ib_copy_to_udata(udata, &uresp, 511 min(sizeof(uresp), udata->outlen))) { 512 err = -EFAULT; 513 goto error; 514 } 515 } else { 516 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER); 517 } 518 519 spin_lock_init(&iwpd->udqp_list_lock); 520 INIT_LIST_HEAD(&iwpd->udqp_list); 521 522 return &iwpd->ibpd; 523 524 error: 525 kfree(iwpd); 526 free_res: 527 528 irdma_free_rsrc(rf, rf->allocated_pds, pd_id); 529 530 return ERR_PTR(err); 531 } 532 533 #endif 534 535 #if __FreeBSD_version >= 1400026 536 void 537 irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 538 { 539 struct irdma_pd *iwpd = to_iwpd(ibpd); 540 struct irdma_device *iwdev = to_iwdev(ibpd->device); 541 542 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); 543 } 544 545 #endif 546 547 #if __FreeBSD_version < 1400026 548 int 549 irdma_dealloc_pd(struct ib_pd *ibpd) 550 { 551 struct irdma_pd *iwpd = to_iwpd(ibpd); 552 struct irdma_device *iwdev = to_iwdev(ibpd->device); 553 554 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id); 555 kfree(iwpd); 556 return 0; 557 } 558 #endif 559 560 /** 561 * irdma_find_qp_update_qs - update QS handle for UD QPs 562 * @rf: RDMA PCI function 563 * @pd: protection domain object 564 * @user_pri: selected user priority 565 */ 566 static void 567 irdma_find_qp_update_qs(struct irdma_pci_f *rf, 568 struct irdma_pd *pd, u8 user_pri) 569 { 570 struct irdma_qp *iwqp; 571 struct list_head *tmp_node, *list_node; 572 struct irdma_udqs_work *work; 573 unsigned long flags; 574 bool qs_change; 575 576 spin_lock_irqsave(&pd->udqp_list_lock, flags); 577 list_for_each_safe(list_node, tmp_node, &pd->udqp_list) { 578 qs_change = true; 579 iwqp = list_entry(list_node, struct irdma_qp, ud_list_elem); 580 irdma_qp_add_ref(&iwqp->ibqp); 581 /* check if qs_handle needs to be changed */ 582 if (iwqp->sc_qp.qs_handle == iwqp->sc_qp.vsi->qos[user_pri].qs_handle) { 583 if (iwqp->ctx_info.user_pri == user_pri) { 584 /* qs_handle and user_pri don't change */ 585 irdma_qp_rem_ref(&iwqp->ibqp); 586 continue; 587 } 588 qs_change = false; 589 } 590 /* perform qp qos change */ 591 work = kzalloc(sizeof(*work), GFP_ATOMIC); 592 if (!work) { 593 irdma_qp_rem_ref(&iwqp->ibqp); 594 spin_unlock_irqrestore(&pd->udqp_list_lock, flags); 595 return; 596 } 597 work->iwqp = iwqp; 598 work->user_prio = user_pri; 599 work->qs_change = qs_change; 600 INIT_WORK(&work->work, irdma_udqp_qs_worker); 601 if (qs_change) 602 irdma_cqp_qp_suspend_resume(&iwqp->sc_qp, IRDMA_OP_SUSPEND); 603 queue_work(rf->iwdev->cleanup_wq, &work->work); 604 } 605 spin_unlock_irqrestore(&pd->udqp_list_lock, flags); 606 } 607 608 static void 609 irdma_fill_ah_info(struct vnet *vnet, struct irdma_ah_info *ah_info, 610 const struct ib_gid_attr *sgid_attr, 611 struct sockaddr *sgid_addr, struct sockaddr *dgid_addr, 612 u8 *dmac, u8 net_type) 613 { 614 if (net_type == RDMA_NETWORK_IPV4) { 615 ah_info->ipv4_valid = true; 616 ah_info->dest_ip_addr[0] = 617 ntohl(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr); 618 ah_info->src_ip_addr[0] = 619 ntohl(((struct sockaddr_in *)sgid_addr)->sin_addr.s_addr); 620 CURVNET_SET_QUIET(vnet); 621 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0], 622 ah_info->dest_ip_addr[0]); 623 CURVNET_RESTORE(); 624 if (ipv4_is_multicast(((struct sockaddr_in *)dgid_addr)->sin_addr.s_addr)) { 625 irdma_mcast_mac_v4(ah_info->dest_ip_addr, dmac); 626 } 627 } else { 628 irdma_copy_ip_ntohl(ah_info->dest_ip_addr, 629 ((struct sockaddr_in6 *)dgid_addr)->sin6_addr.__u6_addr.__u6_addr32); 630 irdma_copy_ip_ntohl(ah_info->src_ip_addr, 631 ((struct sockaddr_in6 *)sgid_addr)->sin6_addr.__u6_addr.__u6_addr32); 632 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr, 633 ah_info->dest_ip_addr); 634 if (rdma_is_multicast_addr(&((struct sockaddr_in6 *)dgid_addr)->sin6_addr)) { 635 irdma_mcast_mac_v6(ah_info->dest_ip_addr, dmac); 636 } 637 } 638 } 639 640 static inline u8 irdma_get_vlan_ndev_prio(if_t ndev, u8 prio) 641 { 642 return prio; 643 } 644 645 static int 646 irdma_create_ah_vlan_tag(struct irdma_device *iwdev, 647 struct irdma_pd *pd, 648 struct irdma_ah_info *ah_info, 649 const struct ib_gid_attr *sgid_attr, 650 u8 *dmac) 651 { 652 u16 vlan_prio; 653 654 if (sgid_attr->ndev && is_vlan_dev(sgid_attr->ndev)) 655 ah_info->vlan_tag = vlan_dev_vlan_id(sgid_attr->ndev); 656 else 657 ah_info->vlan_tag = VLAN_N_VID; 658 659 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr, dmac); 660 661 if (ah_info->dst_arpindex == -1) 662 return -EINVAL; 663 664 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode) 665 ah_info->vlan_tag = 0; 666 667 if (ah_info->vlan_tag < VLAN_N_VID) { 668 if_t ndev = sgid_attr->ndev; 669 670 ah_info->insert_vlan_tag = true; 671 vlan_prio = (u16)irdma_get_vlan_ndev_prio(ndev, rt_tos2priority(ah_info->tc_tos)); 672 ah_info->vlan_tag |= vlan_prio << VLAN_PRIO_SHIFT; 673 irdma_find_qp_update_qs(iwdev->rf, pd, vlan_prio); 674 } 675 if (iwdev->roce_dcqcn_en) { 676 ah_info->tc_tos &= ~ECN_CODE_PT_MASK; 677 ah_info->tc_tos |= ECN_CODE_PT_VAL; 678 } 679 680 return 0; 681 } 682 683 static int 684 irdma_create_ah_wait(struct irdma_pci_f *rf, 685 struct irdma_sc_ah *sc_ah, bool sleep) 686 { 687 if (!sleep) { 688 int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms * 689 CQP_TIMEOUT_THRESHOLD; 690 691 do { 692 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); 693 mdelay(1); 694 } while (!sc_ah->ah_info.ah_valid && --cnt); 695 696 if (!cnt) 697 return -ETIMEDOUT; 698 } 699 return 0; 700 } 701 702 #define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd) 703 704 #if __FreeBSD_version >= 1400026 705 /** 706 * irdma_create_ah - create address handle 707 * @ib_ah: ptr to AH 708 * @attr: address handle attributes 709 * @flags: AH flags to wait 710 * @udata: user data 711 * 712 * returns 0 on success, error otherwise 713 */ 714 int 715 irdma_create_ah(struct ib_ah *ib_ah, 716 struct ib_ah_attr *attr, u32 flags, 717 struct ib_udata *udata) 718 { 719 struct irdma_pd *pd = to_iwpd(ib_ah->pd); 720 struct irdma_ah *ah = container_of(ib_ah, struct irdma_ah, ibah); 721 struct irdma_device *iwdev = to_iwdev(ib_ah->pd->device); 722 union ib_gid sgid; 723 struct ib_gid_attr sgid_attr; 724 struct irdma_pci_f *rf = iwdev->rf; 725 struct irdma_sc_ah *sc_ah; 726 u32 ah_id = 0; 727 struct irdma_ah_info *ah_info; 728 struct irdma_create_ah_resp uresp; 729 union { 730 struct sockaddr saddr; 731 struct sockaddr_in saddr_in; 732 struct sockaddr_in6 saddr_in6; 733 } sgid_addr, dgid_addr; 734 int err; 735 u8 dmac[ETH_ALEN]; 736 bool sleep = (flags & RDMA_CREATE_AH_SLEEPABLE) != 0; 737 738 if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) 739 return -EINVAL; 740 741 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, 742 rf->max_ah, &ah_id, &rf->next_ah); 743 744 if (err) 745 return err; 746 747 ah->pd = pd; 748 sc_ah = &ah->sc_ah; 749 sc_ah->ah_info.ah_idx = ah_id; 750 sc_ah->ah_info.vsi = &iwdev->vsi; 751 irdma_sc_init_ah(&rf->sc_dev, sc_ah); 752 ah->sgid_index = attr->grh.sgid_index; 753 memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); 754 rcu_read_lock(); 755 err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, 756 attr->grh.sgid_index, &sgid, &sgid_attr); 757 rcu_read_unlock(); 758 if (err) { 759 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 760 "GID lookup at idx=%d with port=%d failed\n", 761 attr->grh.sgid_index, attr->port_num); 762 err = -EINVAL; 763 goto err_gid_l2; 764 } 765 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid); 766 rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); 767 ah->av.attrs = *attr; 768 ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr, 769 sgid_attr.gid_type, 770 &sgid); 771 772 if (sgid_attr.ndev) 773 dev_put(sgid_attr.ndev); 774 775 ah->av.sgid_addr.saddr = sgid_addr.saddr; 776 ah->av.dgid_addr.saddr = dgid_addr.saddr; 777 ah_info = &sc_ah->ah_info; 778 ah_info->ah_idx = ah_id; 779 ah_info->pd_idx = pd->sc_pd.pd_id; 780 ether_addr_copy(ah_info->mac_addr, if_getlladdr(iwdev->netdev)); 781 782 if (attr->ah_flags & IB_AH_GRH) { 783 ah_info->flow_label = attr->grh.flow_label; 784 ah_info->hop_ttl = attr->grh.hop_limit; 785 ah_info->tc_tos = attr->grh.traffic_class; 786 } 787 788 ether_addr_copy(dmac, attr->dmac); 789 790 irdma_fill_ah_info(if_getvnet(iwdev->netdev), ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr, 791 dmac, ah->av.net_type); 792 793 err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac); 794 if (err) 795 goto err_gid_l2; 796 797 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE, 798 sleep, irdma_gsi_ud_qp_ah_cb, sc_ah); 799 if (err) { 800 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP-OP Create AH fail"); 801 goto err_gid_l2; 802 } 803 804 err = irdma_create_ah_wait(rf, sc_ah, sleep); 805 if (err) { 806 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out"); 807 goto err_gid_l2; 808 } 809 810 if (udata) { 811 uresp.ah_id = ah->sc_ah.ah_info.ah_idx; 812 err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 813 if (err) { 814 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, 815 IRDMA_OP_AH_DESTROY, false, NULL, ah); 816 goto err_gid_l2; 817 } 818 } 819 820 return 0; 821 err_gid_l2: 822 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); 823 824 return err; 825 } 826 #endif 827 828 void 829 irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr) 830 { 831 ether_addr_copy(dmac, attr->dmac); 832 } 833 834 #if __FreeBSD_version < 1400026 835 struct ib_ah * 836 irdma_create_ah_stub(struct ib_pd *ibpd, 837 struct ib_ah_attr *attr, 838 struct ib_udata *udata) 839 #else 840 int 841 irdma_create_ah_stub(struct ib_ah *ib_ah, 842 struct ib_ah_attr *attr, u32 flags, 843 struct ib_udata *udata) 844 #endif 845 { 846 #if __FreeBSD_version >= 1400026 847 return -ENOSYS; 848 #else 849 return ERR_PTR(-ENOSYS); 850 #endif 851 } 852 853 #if __FreeBSD_version >= 1400026 854 void 855 irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags) 856 { 857 return; 858 } 859 #else 860 int 861 irdma_destroy_ah_stub(struct ib_ah *ibah) 862 { 863 return -ENOSYS; 864 } 865 #endif 866 867 #if __FreeBSD_version < 1400026 868 /** 869 * irdma_create_ah - create address handle 870 * @ibpd: ptr to pd 871 * @attr: address handle attributes 872 * @udata: user data 873 * 874 * returns a pointer to an address handle 875 */ 876 struct ib_ah * 877 irdma_create_ah(struct ib_pd *ibpd, 878 struct ib_ah_attr *attr, 879 struct ib_udata *udata) 880 { 881 struct irdma_pd *pd = to_iwpd(ibpd); 882 struct irdma_device *iwdev = to_iwdev(ibpd->device); 883 struct irdma_ah *ah; 884 union ib_gid sgid; 885 struct ib_gid_attr sgid_attr; 886 struct irdma_pci_f *rf = iwdev->rf; 887 struct irdma_sc_ah *sc_ah; 888 u32 ah_id = 0; 889 struct irdma_ah_info *ah_info; 890 struct irdma_create_ah_resp uresp; 891 union { 892 struct sockaddr saddr; 893 struct sockaddr_in saddr_in; 894 struct sockaddr_in6 saddr_in6; 895 } sgid_addr, dgid_addr; 896 int err; 897 u8 dmac[ETH_ALEN]; 898 bool sleep = udata ? true : false; 899 900 if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) 901 return ERR_PTR(-EINVAL); 902 903 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, 904 rf->max_ah, &ah_id, &rf->next_ah); 905 906 if (err) 907 return ERR_PTR(err); 908 909 ah = kzalloc(sizeof(*ah), GFP_ATOMIC); 910 if (!ah) { 911 irdma_free_rsrc(rf, rf->allocated_ahs, ah_id); 912 return ERR_PTR(-ENOMEM); 913 } 914 915 ah->pd = pd; 916 sc_ah = &ah->sc_ah; 917 sc_ah->ah_info.ah_idx = ah_id; 918 sc_ah->ah_info.vsi = &iwdev->vsi; 919 irdma_sc_init_ah(&rf->sc_dev, sc_ah); 920 ah->sgid_index = attr->grh.sgid_index; 921 memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid)); 922 rcu_read_lock(); 923 err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num, 924 attr->grh.sgid_index, &sgid, &sgid_attr); 925 rcu_read_unlock(); 926 if (err) { 927 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, 928 "GID lookup at idx=%d with port=%d failed\n", 929 attr->grh.sgid_index, attr->port_num); 930 err = -EINVAL; 931 goto err_gid_l2; 932 } 933 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid); 934 rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid); 935 ah->av.attrs = *attr; 936 ah->av.net_type = kc_rdma_gid_attr_network_type(sgid_attr, 937 sgid_attr.gid_type, 938 &sgid); 939 940 if (sgid_attr.ndev) 941 dev_put(sgid_attr.ndev); 942 943 ah->av.sgid_addr.saddr = sgid_addr.saddr; 944 ah->av.dgid_addr.saddr = dgid_addr.saddr; 945 ah_info = &sc_ah->ah_info; 946 ah_info->ah_idx = ah_id; 947 ah_info->pd_idx = pd->sc_pd.pd_id; 948 949 ether_addr_copy(ah_info->mac_addr, if_getlladdr(iwdev->netdev)); 950 if (attr->ah_flags & IB_AH_GRH) { 951 ah_info->flow_label = attr->grh.flow_label; 952 ah_info->hop_ttl = attr->grh.hop_limit; 953 ah_info->tc_tos = attr->grh.traffic_class; 954 } 955 956 if (udata) 957 ib_resolve_eth_dmac(ibpd->device, attr); 958 irdma_ether_copy(dmac, attr); 959 960 irdma_fill_ah_info(if_getvnet(iwdev->netdev), ah_info, &sgid_attr, &sgid_addr.saddr, &dgid_addr.saddr, 961 dmac, ah->av.net_type); 962 963 err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac); 964 if (err) 965 goto err_gid_l2; 966 967 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE, 968 sleep, irdma_gsi_ud_qp_ah_cb, sc_ah); 969 if (err) { 970 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "CQP-OP Create AH fail"); 971 goto err_gid_l2; 972 } 973 974 err = irdma_create_ah_wait(rf, sc_ah, sleep); 975 if (err) { 976 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out"); 977 goto err_gid_l2; 978 } 979 980 if (udata) { 981 uresp.ah_id = ah->sc_ah.ah_info.ah_idx; 982 err = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 983 if (err) { 984 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, 985 IRDMA_OP_AH_DESTROY, false, NULL, ah); 986 goto err_gid_l2; 987 } 988 } 989 990 return &ah->ibah; 991 err_gid_l2: 992 kfree(ah); 993 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id); 994 995 return ERR_PTR(err); 996 } 997 #endif 998 999 /** 1000 * irdma_free_qp_rsrc - free up memory resources for qp 1001 * @iwqp: qp ptr (user or kernel) 1002 */ 1003 void 1004 irdma_free_qp_rsrc(struct irdma_qp *iwqp) 1005 { 1006 struct irdma_device *iwdev = iwqp->iwdev; 1007 struct irdma_pci_f *rf = iwdev->rf; 1008 u32 qp_num = iwqp->ibqp.qp_num; 1009 1010 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); 1011 irdma_dealloc_push_page(rf, &iwqp->sc_qp); 1012 if (iwqp->sc_qp.vsi) { 1013 irdma_qp_rem_qos(&iwqp->sc_qp); 1014 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi, 1015 iwqp->sc_qp.user_pri); 1016 } 1017 1018 if (qp_num > 2) 1019 irdma_free_rsrc(rf, rf->allocated_qps, qp_num); 1020 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->q2_ctx_mem); 1021 irdma_free_dma_mem(rf->sc_dev.hw, &iwqp->kqp.dma_mem); 1022 kfree(iwqp->kqp.sig_trk_mem); 1023 iwqp->kqp.sig_trk_mem = NULL; 1024 kfree(iwqp->kqp.sq_wrid_mem); 1025 kfree(iwqp->kqp.rq_wrid_mem); 1026 kfree(iwqp->sg_list); 1027 kfree(iwqp); 1028 } 1029 1030 /** 1031 * irdma_create_qp - create qp 1032 * @ibpd: ptr of pd 1033 * @init_attr: attributes for qp 1034 * @udata: user data for create qp 1035 */ 1036 struct ib_qp * 1037 irdma_create_qp(struct ib_pd *ibpd, 1038 struct ib_qp_init_attr *init_attr, 1039 struct ib_udata *udata) 1040 { 1041 #define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx) 1042 #define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd) 1043 struct irdma_pd *iwpd = to_iwpd(ibpd); 1044 struct irdma_device *iwdev = to_iwdev(ibpd->device); 1045 struct irdma_pci_f *rf = iwdev->rf; 1046 struct irdma_qp *iwqp; 1047 struct irdma_create_qp_resp uresp = {0}; 1048 u32 qp_num = 0; 1049 int ret; 1050 int err_code; 1051 struct irdma_sc_qp *qp; 1052 struct irdma_sc_dev *dev = &rf->sc_dev; 1053 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs; 1054 struct irdma_qp_init_info init_info = {{0}}; 1055 struct irdma_qp_host_ctx_info *ctx_info; 1056 unsigned long flags; 1057 1058 err_code = irdma_validate_qp_attrs(init_attr, iwdev); 1059 if (err_code) 1060 return ERR_PTR(err_code); 1061 1062 if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN || 1063 udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN)) 1064 return ERR_PTR(-EINVAL); 1065 1066 init_info.vsi = &iwdev->vsi; 1067 init_info.qp_uk_init_info.uk_attrs = uk_attrs; 1068 init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr; 1069 init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr; 1070 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; 1071 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; 1072 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; 1073 1074 iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL); 1075 if (!iwqp) 1076 return ERR_PTR(-ENOMEM); 1077 1078 iwqp->sg_list = kcalloc(uk_attrs->max_hw_wq_frags, sizeof(*iwqp->sg_list), 1079 GFP_KERNEL); 1080 if (!iwqp->sg_list) { 1081 kfree(iwqp); 1082 return ERR_PTR(-ENOMEM); 1083 } 1084 1085 qp = &iwqp->sc_qp; 1086 qp->qp_uk.back_qp = iwqp; 1087 qp->qp_uk.lock = &iwqp->lock; 1088 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; 1089 1090 iwqp->iwdev = iwdev; 1091 iwqp->q2_ctx_mem.size = IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE; 1092 iwqp->q2_ctx_mem.va = irdma_allocate_dma_mem(dev->hw, &iwqp->q2_ctx_mem, 1093 iwqp->q2_ctx_mem.size, 1094 256); 1095 if (!iwqp->q2_ctx_mem.va) { 1096 kfree(iwqp->sg_list); 1097 kfree(iwqp); 1098 return ERR_PTR(-ENOMEM); 1099 } 1100 1101 init_info.q2 = iwqp->q2_ctx_mem.va; 1102 init_info.q2_pa = iwqp->q2_ctx_mem.pa; 1103 init_info.host_ctx = (__le64 *) (init_info.q2 + IRDMA_Q2_BUF_SIZE); 1104 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE; 1105 1106 if (init_attr->qp_type == IB_QPT_GSI) 1107 qp_num = 1; 1108 else 1109 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp, 1110 &qp_num, &rf->next_qp); 1111 if (err_code) 1112 goto error; 1113 1114 iwqp->iwpd = iwpd; 1115 iwqp->ibqp.qp_num = qp_num; 1116 qp = &iwqp->sc_qp; 1117 iwqp->iwscq = to_iwcq(init_attr->send_cq); 1118 iwqp->iwrcq = to_iwcq(init_attr->recv_cq); 1119 iwqp->host_ctx.va = init_info.host_ctx; 1120 iwqp->host_ctx.pa = init_info.host_ctx_pa; 1121 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE; 1122 1123 init_info.pd = &iwpd->sc_pd; 1124 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; 1125 if (!rdma_protocol_roce(&iwdev->ibdev, 1)) 1126 init_info.qp_uk_init_info.first_sq_wq = 1; 1127 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; 1128 init_waitqueue_head(&iwqp->waitq); 1129 init_waitqueue_head(&iwqp->mod_qp_waitq); 1130 1131 if (udata) { 1132 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver; 1133 err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info, init_attr); 1134 } else { 1135 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker); 1136 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER; 1137 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr); 1138 } 1139 1140 if (err_code) { 1141 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "setup qp failed\n"); 1142 goto error; 1143 } 1144 1145 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 1146 if (init_attr->qp_type == IB_QPT_RC) { 1147 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC; 1148 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 1149 IRDMA_WRITE_WITH_IMM | 1150 IRDMA_ROCE; 1151 } else { 1152 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD; 1153 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM | 1154 IRDMA_ROCE; 1155 } 1156 } else { 1157 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP; 1158 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM; 1159 } 1160 1161 ret = irdma_sc_qp_init(qp, &init_info); 1162 if (ret) { 1163 err_code = -EPROTO; 1164 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "qp_init fail\n"); 1165 goto error; 1166 } 1167 1168 ctx_info = &iwqp->ctx_info; 1169 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; 1170 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; 1171 1172 if (rdma_protocol_roce(&iwdev->ibdev, 1)) 1173 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info); 1174 else 1175 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info); 1176 1177 err_code = irdma_cqp_create_qp_cmd(iwqp); 1178 if (err_code) 1179 goto error; 1180 1181 atomic_set(&iwqp->refcnt, 1); 1182 spin_lock_init(&iwqp->lock); 1183 spin_lock_init(&iwqp->sc_qp.pfpdu.lock); 1184 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; 1185 rf->qp_table[qp_num] = iwqp; 1186 1187 if (rdma_protocol_roce(&iwdev->ibdev, 1)) { 1188 if (dev->ws_add(&iwdev->vsi, 0)) { 1189 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp); 1190 err_code = -EINVAL; 1191 goto error; 1192 } 1193 1194 irdma_qp_add_qos(&iwqp->sc_qp); 1195 spin_lock_irqsave(&iwpd->udqp_list_lock, flags); 1196 if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) 1197 list_add_tail(&iwqp->ud_list_elem, &iwpd->udqp_list); 1198 spin_unlock_irqrestore(&iwpd->udqp_list_lock, flags); 1199 } 1200 1201 if (udata) { 1202 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */ 1203 if (udata->outlen < sizeof(uresp)) { 1204 uresp.lsmm = 1; 1205 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1; 1206 } else { 1207 if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) 1208 uresp.lsmm = 1; 1209 } 1210 uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size; 1211 uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size; 1212 uresp.qp_id = qp_num; 1213 uresp.qp_caps = qp->qp_uk.qp_caps; 1214 1215 err_code = ib_copy_to_udata(udata, &uresp, 1216 min(sizeof(uresp), udata->outlen)); 1217 if (err_code) { 1218 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy_to_udata failed\n"); 1219 kc_irdma_destroy_qp(&iwqp->ibqp, udata); 1220 return ERR_PTR(err_code); 1221 } 1222 } 1223 1224 init_completion(&iwqp->free_qp); 1225 return &iwqp->ibqp; 1226 1227 error: 1228 irdma_free_qp_rsrc(iwqp); 1229 1230 return ERR_PTR(err_code); 1231 } 1232 1233 /** 1234 * irdma_destroy_qp - destroy qp 1235 * @ibqp: qp's ib pointer also to get to device's qp address 1236 * @udata: user data 1237 */ 1238 #if __FreeBSD_version >= 1400026 1239 int 1240 irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 1241 #else 1242 int 1243 irdma_destroy_qp(struct ib_qp *ibqp) 1244 #endif 1245 { 1246 struct irdma_qp *iwqp = to_iwqp(ibqp); 1247 struct irdma_device *iwdev = iwqp->iwdev; 1248 unsigned long flags; 1249 1250 if (iwqp->sc_qp.qp_uk.destroy_pending) 1251 goto free_rsrc; 1252 iwqp->sc_qp.qp_uk.destroy_pending = true; 1253 1254 spin_lock_irqsave(&iwqp->iwpd->udqp_list_lock, flags); 1255 if (iwqp->sc_qp.qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) 1256 list_del(&iwqp->ud_list_elem); 1257 spin_unlock_irqrestore(&iwqp->iwpd->udqp_list_lock, flags); 1258 1259 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) 1260 irdma_modify_qp_to_err(&iwqp->sc_qp); 1261 1262 irdma_qp_rem_ref(&iwqp->ibqp); 1263 wait_for_completion(&iwqp->free_qp); 1264 irdma_free_lsmm_rsrc(iwqp); 1265 if (!iwdev->rf->reset && irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp)) 1266 return (iwdev->rf->rdma_ver <= IRDMA_GEN_2 && !iwqp->user_mode) ? 0 : -ENOTRECOVERABLE; 1267 free_rsrc: 1268 if (!iwqp->user_mode) { 1269 if (iwqp->iwscq) { 1270 irdma_clean_cqes(iwqp, iwqp->iwscq); 1271 if (iwqp->iwrcq != iwqp->iwscq) 1272 irdma_clean_cqes(iwqp, iwqp->iwrcq); 1273 } 1274 } 1275 irdma_remove_push_mmap_entries(iwqp); 1276 irdma_free_qp_rsrc(iwqp); 1277 1278 return 0; 1279 } 1280 1281 /** 1282 * irdma_create_cq - create cq 1283 * @ibcq: CQ allocated 1284 * @attr: attributes for cq 1285 * @udata: user data 1286 */ 1287 #if __FreeBSD_version >= 1400026 1288 int 1289 irdma_create_cq(struct ib_cq *ibcq, 1290 const struct ib_cq_init_attr *attr, 1291 struct ib_udata *udata) 1292 #else 1293 struct ib_cq * 1294 irdma_create_cq(struct ib_device *ibdev, 1295 const struct ib_cq_init_attr *attr, 1296 struct ib_ucontext *context, 1297 struct ib_udata *udata) 1298 #endif 1299 { 1300 #define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf) 1301 #define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size) 1302 #if __FreeBSD_version >= 1400026 1303 struct ib_device *ibdev = ibcq->device; 1304 #endif 1305 struct irdma_device *iwdev = to_iwdev(ibdev); 1306 struct irdma_pci_f *rf = iwdev->rf; 1307 #if __FreeBSD_version >= 1400026 1308 struct irdma_cq *iwcq = to_iwcq(ibcq); 1309 #else 1310 struct irdma_cq *iwcq; 1311 #endif 1312 u32 cq_num = 0; 1313 struct irdma_sc_cq *cq; 1314 struct irdma_sc_dev *dev = &rf->sc_dev; 1315 struct irdma_cq_init_info info = {0}; 1316 int status; 1317 struct irdma_cqp_request *cqp_request; 1318 struct cqp_cmds_info *cqp_info; 1319 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; 1320 unsigned long flags; 1321 int err_code; 1322 int entries = attr->cqe; 1323 bool cqe_64byte_ena; 1324 1325 #if __FreeBSD_version >= 1400026 1326 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); 1327 if (err_code) 1328 return err_code; 1329 1330 if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || 1331 udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) 1332 return -EINVAL; 1333 #else 1334 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); 1335 if (err_code) 1336 return ERR_PTR(err_code); 1337 1338 if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN || 1339 udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN)) 1340 return ERR_PTR(-EINVAL); 1341 1342 iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL); 1343 if (!iwcq) 1344 return ERR_PTR(-ENOMEM); 1345 #endif 1346 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num, 1347 &rf->next_cq); 1348 if (err_code) 1349 #if __FreeBSD_version >= 1400026 1350 return err_code; 1351 #else 1352 goto error; 1353 #endif 1354 cq = &iwcq->sc_cq; 1355 cq->back_cq = iwcq; 1356 atomic_set(&iwcq->refcnt, 1); 1357 spin_lock_init(&iwcq->lock); 1358 INIT_LIST_HEAD(&iwcq->resize_list); 1359 INIT_LIST_HEAD(&iwcq->cmpl_generated); 1360 info.dev = dev; 1361 ukinfo->cq_size = max(entries, 4); 1362 ukinfo->cq_id = cq_num; 1363 cqe_64byte_ena = (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE) ? true : false; 1364 ukinfo->avoid_mem_cflct = cqe_64byte_ena; 1365 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; 1366 atomic_set(&iwcq->armed, 0); 1367 if (attr->comp_vector < rf->ceqs_count) 1368 info.ceq_id = attr->comp_vector; 1369 info.ceq_id_valid = true; 1370 info.ceqe_mask = 1; 1371 info.type = IRDMA_CQ_TYPE_IWARP; 1372 info.vsi = &iwdev->vsi; 1373 1374 if (udata) { 1375 struct irdma_ucontext *ucontext; 1376 struct irdma_create_cq_req req = {0}; 1377 struct irdma_cq_mr *cqmr; 1378 struct irdma_pbl *iwpbl; 1379 struct irdma_pbl *iwpbl_shadow; 1380 struct irdma_cq_mr *cqmr_shadow; 1381 1382 iwcq->user_mode = true; 1383 #if __FreeBSD_version >= 1400026 1384 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 1385 #else 1386 ucontext = to_ucontext(context); 1387 #endif 1388 1389 if (ib_copy_from_udata(&req, udata, 1390 min(sizeof(req), udata->inlen))) { 1391 err_code = -EFAULT; 1392 goto cq_free_rsrc; 1393 } 1394 1395 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1396 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf, 1397 &ucontext->cq_reg_mem_list); 1398 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1399 if (!iwpbl) { 1400 err_code = -EPROTO; 1401 goto cq_free_rsrc; 1402 } 1403 iwcq->iwpbl = iwpbl; 1404 iwcq->cq_mem_size = 0; 1405 cqmr = &iwpbl->cq_mr; 1406 1407 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags & 1408 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) { 1409 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1410 iwpbl_shadow = irdma_get_pbl((unsigned long)req.user_shadow_area, 1411 &ucontext->cq_reg_mem_list); 1412 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1413 1414 if (!iwpbl_shadow) { 1415 err_code = -EPROTO; 1416 goto cq_free_rsrc; 1417 } 1418 iwcq->iwpbl_shadow = iwpbl_shadow; 1419 cqmr_shadow = &iwpbl_shadow->cq_mr; 1420 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr; 1421 cqmr->split = true; 1422 } else { 1423 info.shadow_area_pa = cqmr->shadow; 1424 } 1425 if (iwpbl->pbl_allocated) { 1426 info.virtual_map = true; 1427 info.pbl_chunk_size = 1; 1428 info.first_pm_pbl_idx = cqmr->cq_pbl.idx; 1429 } else { 1430 info.cq_base_pa = cqmr->cq_pbl.addr; 1431 } 1432 } else { 1433 /* Kmode allocations */ 1434 int rsize; 1435 1436 if (entries < 1 || entries > rf->max_cqe) { 1437 err_code = -EINVAL; 1438 goto cq_free_rsrc; 1439 } 1440 1441 entries++; 1442 if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1443 entries *= 2; 1444 ukinfo->cq_size = entries; 1445 1446 if (cqe_64byte_ena) 1447 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe); 1448 else 1449 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); 1450 iwcq->kmem.size = round_up(rsize, IRDMA_HW_PAGE_SIZE); 1451 iwcq->kmem.va = irdma_allocate_dma_mem(dev->hw, &iwcq->kmem, 1452 iwcq->kmem.size, IRDMA_HW_PAGE_SIZE); 1453 if (!iwcq->kmem.va) { 1454 err_code = -ENOMEM; 1455 goto cq_free_rsrc; 1456 } 1457 1458 iwcq->kmem_shadow.size = IRDMA_SHADOW_AREA_SIZE << 3; 1459 iwcq->kmem_shadow.va = irdma_allocate_dma_mem(dev->hw, 1460 &iwcq->kmem_shadow, 1461 iwcq->kmem_shadow.size, 1462 64); 1463 1464 if (!iwcq->kmem_shadow.va) { 1465 err_code = -ENOMEM; 1466 goto cq_free_rsrc; 1467 } 1468 info.shadow_area_pa = iwcq->kmem_shadow.pa; 1469 ukinfo->shadow_area = iwcq->kmem_shadow.va; 1470 ukinfo->cq_base = iwcq->kmem.va; 1471 info.cq_base_pa = iwcq->kmem.pa; 1472 } 1473 1474 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) 1475 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, 1476 (u32)IRDMA_MAX_CQ_READ_THRESH); 1477 if (irdma_sc_cq_init(cq, &info)) { 1478 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "init cq fail\n"); 1479 err_code = -EPROTO; 1480 goto cq_free_rsrc; 1481 } 1482 1483 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); 1484 if (!cqp_request) { 1485 err_code = -ENOMEM; 1486 goto cq_free_rsrc; 1487 } 1488 cqp_info = &cqp_request->info; 1489 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE; 1490 cqp_info->post_sq = 1; 1491 cqp_info->in.u.cq_create.cq = cq; 1492 cqp_info->in.u.cq_create.check_overflow = true; 1493 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; 1494 status = irdma_handle_cqp_op(rf, cqp_request); 1495 irdma_put_cqp_request(&rf->cqp, cqp_request); 1496 if (status) { 1497 err_code = -ENOMEM; 1498 goto cq_free_rsrc; 1499 } 1500 1501 if (udata) { 1502 struct irdma_create_cq_resp resp = {0}; 1503 1504 resp.cq_id = info.cq_uk_init_info.cq_id; 1505 resp.cq_size = info.cq_uk_init_info.cq_size; 1506 if (ib_copy_to_udata(udata, &resp, 1507 min(sizeof(resp), udata->outlen))) { 1508 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy to user data\n"); 1509 err_code = -EPROTO; 1510 goto cq_destroy; 1511 } 1512 } 1513 1514 rf->cq_table[cq_num] = iwcq; 1515 init_completion(&iwcq->free_cq); 1516 1517 #if __FreeBSD_version >= 1400026 1518 return 0; 1519 #else 1520 return &iwcq->ibcq; 1521 #endif 1522 cq_destroy: 1523 irdma_cq_wq_destroy(rf, cq); 1524 cq_free_rsrc: 1525 irdma_cq_free_rsrc(rf, iwcq); 1526 #if __FreeBSD_version >= 1400026 1527 return err_code; 1528 #else 1529 error: 1530 kfree(iwcq); 1531 return ERR_PTR(err_code); 1532 #endif 1533 } 1534 1535 /** 1536 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally 1537 * @iwmr: iwmr for IB's user page addresses 1538 * @pbl: ple pointer to save 1 level or 0 level pble 1539 * @level: indicated level 0, 1 or 2 1540 */ 1541 1542 void 1543 irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, 1544 enum irdma_pble_level level) 1545 { 1546 struct ib_umem *region = iwmr->region; 1547 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 1548 int chunk_pages, entry, i; 1549 struct scatterlist *sg; 1550 u64 pg_addr = 0; 1551 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; 1552 struct irdma_pble_info *pinfo; 1553 u32 idx = 0; 1554 u32 pbl_cnt = 0; 1555 1556 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf; 1557 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { 1558 chunk_pages = DIV_ROUND_UP(sg_dma_len(sg), iwmr->page_size); 1559 if (iwmr->type == IRDMA_MEMREG_TYPE_QP && !iwpbl->qp_mr.sq_page) 1560 iwpbl->qp_mr.sq_page = sg_page(sg); 1561 for (i = 0; i < chunk_pages; i++) { 1562 pg_addr = sg_dma_address(sg) + (i * iwmr->page_size); 1563 if ((entry + i) == 0) 1564 *pbl = pg_addr & iwmr->page_msk; 1565 else if (!(pg_addr & ~iwmr->page_msk)) 1566 *pbl = pg_addr; 1567 else 1568 continue; 1569 if (++pbl_cnt == palloc->total_cnt) 1570 break; 1571 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx); 1572 } 1573 } 1574 } 1575 1576 /** 1577 * irdma_destroy_ah - Destroy address handle 1578 * @ibah: pointer to address handle 1579 * @ah_flags: destroy flags 1580 */ 1581 1582 #if __FreeBSD_version >= 1400026 1583 void 1584 irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) 1585 { 1586 struct irdma_device *iwdev = to_iwdev(ibah->device); 1587 struct irdma_ah *ah = to_iwah(ibah); 1588 1589 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, 1590 false, NULL, ah); 1591 1592 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, 1593 ah->sc_ah.ah_info.ah_idx); 1594 } 1595 #endif 1596 1597 #if __FreeBSD_version < 1400026 1598 int 1599 irdma_destroy_ah(struct ib_ah *ibah) 1600 { 1601 struct irdma_device *iwdev = to_iwdev(ibah->device); 1602 struct irdma_ah *ah = to_iwah(ibah); 1603 1604 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY, 1605 false, NULL, ah); 1606 1607 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, 1608 ah->sc_ah.ah_info.ah_idx); 1609 1610 kfree(ah); 1611 return 0; 1612 } 1613 #endif 1614 1615 #if __FreeBSD_version >= 1400026 1616 int 1617 irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) 1618 #else 1619 int 1620 irdma_dereg_mr(struct ib_mr *ib_mr) 1621 #endif 1622 { 1623 struct irdma_mr *iwmr = to_iwmr(ib_mr); 1624 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 1625 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 1626 int ret; 1627 1628 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { 1629 if (iwmr->region) { 1630 struct irdma_ucontext *ucontext; 1631 #if __FreeBSD_version >= 1400026 1632 1633 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext); 1634 1635 #else 1636 struct ib_pd *ibpd = ib_mr->pd; 1637 1638 ucontext = to_ucontext(ibpd->uobject->context); 1639 #endif 1640 irdma_del_memlist(iwmr, ucontext); 1641 } 1642 goto done; 1643 } 1644 1645 ret = irdma_hwdereg_mr(ib_mr); 1646 if (ret) 1647 return ret; 1648 1649 irdma_free_stag(iwdev, iwmr->stag); 1650 done: 1651 if (iwpbl->pbl_allocated) 1652 irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); 1653 1654 if (iwmr->region) 1655 ib_umem_release(iwmr->region); 1656 1657 kfree(iwmr); 1658 1659 return 0; 1660 } 1661 1662 /* 1663 * irdma_rereg_user_mr - Re-Register a user memory region @ibmr: ib mem to access iwarp mr pointer @flags: bit mask to 1664 * indicate which of the attr's of MR modified @start: virtual start address @len: length of mr @virt: virtual address 1665 * @new access flags: bit mask of access flags @new_pd: ptr of pd @udata: user data 1666 */ 1667 int 1668 irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, 1669 u64 virt, int new_access, struct ib_pd *new_pd, 1670 struct ib_udata *udata) 1671 { 1672 struct irdma_device *iwdev = to_iwdev(ib_mr->device); 1673 struct irdma_mr *iwmr = to_iwmr(ib_mr); 1674 struct irdma_pbl *iwpbl = &iwmr->iwpbl; 1675 int ret; 1676 1677 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) 1678 return -EINVAL; 1679 1680 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) 1681 return -EOPNOTSUPP; 1682 1683 ret = irdma_hwdereg_mr(ib_mr); 1684 if (ret) 1685 return ret; 1686 1687 if (flags & IB_MR_REREG_ACCESS) 1688 iwmr->access = new_access; 1689 1690 if (flags & IB_MR_REREG_PD) { 1691 iwmr->ibmr.pd = new_pd; 1692 iwmr->ibmr.device = new_pd->device; 1693 } 1694 1695 if (flags & IB_MR_REREG_TRANS) { 1696 if (iwpbl->pbl_allocated) { 1697 irdma_free_pble(iwdev->rf->pble_rsrc, 1698 &iwpbl->pble_alloc); 1699 iwpbl->pbl_allocated = false; 1700 } 1701 if (iwmr->region) { 1702 ib_umem_release(iwmr->region); 1703 iwmr->region = NULL; 1704 } 1705 1706 ib_mr = irdma_rereg_mr_trans(iwmr, start, len, virt, udata); 1707 if (IS_ERR(ib_mr)) 1708 return PTR_ERR(ib_mr); 1709 1710 } else { 1711 ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access); 1712 if (ret) 1713 return ret; 1714 } 1715 1716 return 0; 1717 } 1718 1719 int 1720 kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp, struct ib_qp_attr *attr, 1721 u16 *vlan_id) 1722 { 1723 int ret; 1724 union ib_gid sgid; 1725 struct ib_gid_attr sgid_attr; 1726 struct irdma_av *av = &iwqp->roce_ah.av; 1727 1728 ret = ib_get_cached_gid(iwqp->ibqp.device, attr->ah_attr.port_num, 1729 attr->ah_attr.grh.sgid_index, &sgid, 1730 &sgid_attr); 1731 if (ret) 1732 return ret; 1733 1734 if (sgid_attr.ndev) { 1735 *vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); 1736 ether_addr_copy(iwqp->ctx_info.roce_info->mac_addr, if_getlladdr(sgid_attr.ndev)); 1737 } 1738 1739 av->net_type = kc_rdma_gid_attr_network_type(sgid_attr, 1740 sgid_attr.gid_type, 1741 &sgid); 1742 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid); 1743 dev_put(sgid_attr.ndev); 1744 iwqp->sc_qp.user_pri = iwqp->ctx_info.user_pri; 1745 1746 return 0; 1747 } 1748 1749 #if __FreeBSD_version >= 1400026 1750 /** 1751 * irdma_destroy_cq - destroy cq 1752 * @ib_cq: cq pointer 1753 * @udata: user data 1754 */ 1755 void 1756 irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) 1757 { 1758 struct irdma_device *iwdev = to_iwdev(ib_cq->device); 1759 struct irdma_cq *iwcq = to_iwcq(ib_cq); 1760 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1761 struct irdma_sc_dev *dev = cq->dev; 1762 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; 1763 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq); 1764 unsigned long flags; 1765 1766 spin_lock_irqsave(&iwcq->lock, flags); 1767 if (!list_empty(&iwcq->cmpl_generated)) 1768 irdma_remove_cmpls_list(iwcq); 1769 if (!list_empty(&iwcq->resize_list)) 1770 irdma_process_resize_list(iwcq, iwdev, NULL); 1771 spin_unlock_irqrestore(&iwcq->lock, flags); 1772 1773 irdma_cq_rem_ref(ib_cq); 1774 wait_for_completion(&iwcq->free_cq); 1775 1776 irdma_cq_wq_destroy(iwdev->rf, cq); 1777 1778 spin_lock_irqsave(&iwceq->ce_lock, flags); 1779 irdma_sc_cleanup_ceqes(cq, ceq); 1780 spin_unlock_irqrestore(&iwceq->ce_lock, flags); 1781 irdma_cq_free_rsrc(iwdev->rf, iwcq); 1782 } 1783 1784 #endif 1785 #if __FreeBSD_version < 1400026 1786 /** 1787 * irdma_destroy_cq - destroy cq 1788 * @ib_cq: cq pointer 1789 */ 1790 int 1791 irdma_destroy_cq(struct ib_cq *ib_cq) 1792 { 1793 struct irdma_device *iwdev = to_iwdev(ib_cq->device); 1794 struct irdma_cq *iwcq = to_iwcq(ib_cq); 1795 struct irdma_sc_cq *cq = &iwcq->sc_cq; 1796 struct irdma_sc_dev *dev = cq->dev; 1797 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id]; 1798 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq); 1799 unsigned long flags; 1800 1801 spin_lock_irqsave(&iwcq->lock, flags); 1802 if (!list_empty(&iwcq->cmpl_generated)) 1803 irdma_remove_cmpls_list(iwcq); 1804 if (!list_empty(&iwcq->resize_list)) 1805 irdma_process_resize_list(iwcq, iwdev, NULL); 1806 spin_unlock_irqrestore(&iwcq->lock, flags); 1807 1808 irdma_cq_rem_ref(ib_cq); 1809 wait_for_completion(&iwcq->free_cq); 1810 1811 irdma_cq_wq_destroy(iwdev->rf, cq); 1812 1813 spin_lock_irqsave(&iwceq->ce_lock, flags); 1814 irdma_sc_cleanup_ceqes(cq, ceq); 1815 spin_unlock_irqrestore(&iwceq->ce_lock, flags); 1816 1817 irdma_cq_free_rsrc(iwdev->rf, iwcq); 1818 kfree(iwcq); 1819 1820 return 0; 1821 } 1822 1823 #endif 1824 /** 1825 * irdma_alloc_mw - Allocate memory window 1826 * @pd: Protection domain 1827 * @type: Window type 1828 * @udata: user data pointer 1829 */ 1830 struct ib_mw * 1831 irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 1832 struct ib_udata *udata) 1833 { 1834 struct irdma_device *iwdev = to_iwdev(pd->device); 1835 struct irdma_mr *iwmr; 1836 int err_code; 1837 u32 stag; 1838 1839 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); 1840 if (!iwmr) 1841 return ERR_PTR(-ENOMEM); 1842 1843 stag = irdma_create_stag(iwdev); 1844 if (!stag) { 1845 kfree(iwmr); 1846 return ERR_PTR(-ENOMEM); 1847 } 1848 1849 iwmr->stag = stag; 1850 iwmr->ibmw.rkey = stag; 1851 iwmr->ibmw.pd = pd; 1852 iwmr->ibmw.type = type; 1853 iwmr->ibmw.device = pd->device; 1854 1855 err_code = irdma_hw_alloc_mw(iwdev, iwmr); 1856 if (err_code) { 1857 irdma_free_stag(iwdev, stag); 1858 kfree(iwmr); 1859 return ERR_PTR(err_code); 1860 } 1861 1862 return &iwmr->ibmw; 1863 } 1864 1865 /** 1866 * kc_set_loc_seq_num_mss - Set local seq number and mss 1867 * @cm_node: cm node info 1868 */ 1869 void 1870 kc_set_loc_seq_num_mss(struct irdma_cm_node *cm_node) 1871 { 1872 struct timespec ts; 1873 1874 getnanotime(&ts); 1875 cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec; 1876 if (cm_node->iwdev->vsi.mtu > 1500 && 1877 2 * cm_node->iwdev->vsi.mtu > cm_node->iwdev->rcv_wnd) 1878 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? 1879 (1500 - IRDMA_MTU_TO_MSS_IPV4) : 1880 (1500 - IRDMA_MTU_TO_MSS_IPV6); 1881 else 1882 cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? 1883 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4) : 1884 (cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6); 1885 } 1886 1887 #if __FreeBSD_version < 1400026 1888 struct irdma_vma_data { 1889 struct list_head list; 1890 struct vm_area_struct *vma; 1891 struct mutex *vma_list_mutex; /* protect the vma_list */ 1892 }; 1893 1894 /** 1895 * irdma_vma_open - 1896 * @vma: User VMA 1897 */ 1898 static void 1899 irdma_vma_open(struct vm_area_struct *vma) 1900 { 1901 vma->vm_ops = NULL; 1902 } 1903 1904 /** 1905 * irdma_vma_close - Remove vma data from vma list 1906 * @vma: User VMA 1907 */ 1908 static void 1909 irdma_vma_close(struct vm_area_struct *vma) 1910 { 1911 struct irdma_vma_data *vma_data; 1912 1913 vma_data = vma->vm_private_data; 1914 vma->vm_private_data = NULL; 1915 vma_data->vma = NULL; 1916 mutex_lock(vma_data->vma_list_mutex); 1917 list_del(&vma_data->list); 1918 mutex_unlock(vma_data->vma_list_mutex); 1919 kfree(vma_data); 1920 } 1921 1922 static const struct vm_operations_struct irdma_vm_ops = { 1923 .open = irdma_vma_open, 1924 .close = irdma_vma_close 1925 }; 1926 1927 /** 1928 * irdma_set_vma_data - Save vma data in context list 1929 * @vma: User VMA 1930 * @context: ib user context 1931 */ 1932 static int 1933 irdma_set_vma_data(struct vm_area_struct *vma, 1934 struct irdma_ucontext *context) 1935 { 1936 struct list_head *vma_head = &context->vma_list; 1937 struct irdma_vma_data *vma_entry; 1938 1939 vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL); 1940 if (!vma_entry) 1941 return -ENOMEM; 1942 1943 vma->vm_private_data = vma_entry; 1944 vma->vm_ops = &irdma_vm_ops; 1945 1946 vma_entry->vma = vma; 1947 vma_entry->vma_list_mutex = &context->vma_list_mutex; 1948 1949 mutex_lock(&context->vma_list_mutex); 1950 list_add(&vma_entry->list, vma_head); 1951 mutex_unlock(&context->vma_list_mutex); 1952 1953 return 0; 1954 } 1955 1956 /** 1957 * irdma_disassociate_ucontext - Disassociate user context 1958 * @context: ib user context 1959 */ 1960 void 1961 irdma_disassociate_ucontext(struct ib_ucontext *context) 1962 { 1963 struct irdma_ucontext *ucontext = to_ucontext(context); 1964 1965 struct irdma_vma_data *vma_data, *n; 1966 struct vm_area_struct *vma; 1967 1968 mutex_lock(&ucontext->vma_list_mutex); 1969 list_for_each_entry_safe(vma_data, n, &ucontext->vma_list, list) { 1970 vma = vma_data->vma; 1971 zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE); 1972 1973 vma->vm_ops = NULL; 1974 list_del(&vma_data->list); 1975 kfree(vma_data); 1976 } 1977 mutex_unlock(&ucontext->vma_list_mutex); 1978 } 1979 1980 int 1981 rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma, 1982 unsigned long pfn, unsigned long size, pgprot_t prot) 1983 { 1984 if (io_remap_pfn_range(vma, 1985 vma->vm_start, 1986 pfn, 1987 size, 1988 prot)) 1989 return -EAGAIN; 1990 1991 return irdma_set_vma_data(vma, to_ucontext(context)); 1992 } 1993 #else 1994 /** 1995 * irdma_disassociate_ucontext - Disassociate user context 1996 * @context: ib user context 1997 */ 1998 void 1999 irdma_disassociate_ucontext(struct ib_ucontext *context) 2000 { 2001 } 2002 #endif 2003 2004 struct ib_device * 2005 ib_device_get_by_netdev(if_t netdev, int driver_id) 2006 { 2007 struct irdma_device *iwdev; 2008 struct irdma_handler *hdl; 2009 unsigned long flags; 2010 2011 spin_lock_irqsave(&irdma_handler_lock, flags); 2012 list_for_each_entry(hdl, &irdma_handlers, list) { 2013 iwdev = hdl->iwdev; 2014 if (netdev == iwdev->netdev) { 2015 spin_unlock_irqrestore(&irdma_handler_lock, 2016 flags); 2017 return &iwdev->ibdev; 2018 } 2019 } 2020 spin_unlock_irqrestore(&irdma_handler_lock, flags); 2021 2022 return NULL; 2023 } 2024 2025 void 2026 ib_unregister_device_put(struct ib_device *device) 2027 { 2028 ib_unregister_device(device); 2029 } 2030 2031 /** 2032 * irdma_query_gid_roce - Query port GID for Roce 2033 * @ibdev: device pointer from stack 2034 * @port: port number 2035 * @index: Entry index 2036 * @gid: Global ID 2037 */ 2038 int 2039 irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index, 2040 union ib_gid *gid) 2041 { 2042 int ret; 2043 2044 ret = rdma_query_gid(ibdev, port, index, gid); 2045 if (ret == -EAGAIN) { 2046 memcpy(gid, &zgid, sizeof(*gid)); 2047 return 0; 2048 } 2049 2050 return ret; 2051 } 2052 2053 /** 2054 * irdma_modify_port - modify port attributes 2055 * @ibdev: device pointer from stack 2056 * @port: port number for query 2057 * @mask: Property mask 2058 * @props: returning device attributes 2059 */ 2060 int 2061 irdma_modify_port(struct ib_device *ibdev, u8 port, int mask, 2062 struct ib_port_modify *props) 2063 { 2064 if (port > 1) 2065 return -EINVAL; 2066 2067 return 0; 2068 } 2069 2070 /** 2071 * irdma_query_pkey - Query partition key 2072 * @ibdev: device pointer from stack 2073 * @port: port number 2074 * @index: index of pkey 2075 * @pkey: pointer to store the pkey 2076 */ 2077 int 2078 irdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 2079 u16 *pkey) 2080 { 2081 if (index >= IRDMA_PKEY_TBL_SZ) 2082 return -EINVAL; 2083 2084 *pkey = IRDMA_DEFAULT_PKEY; 2085 return 0; 2086 } 2087 2088 int 2089 irdma_roce_port_immutable(struct ib_device *ibdev, u8 port_num, 2090 struct ib_port_immutable *immutable) 2091 { 2092 struct ib_port_attr attr; 2093 int err; 2094 2095 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 2096 err = ib_query_port(ibdev, port_num, &attr); 2097 if (err) 2098 return err; 2099 2100 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2101 immutable->pkey_tbl_len = attr.pkey_tbl_len; 2102 immutable->gid_tbl_len = attr.gid_tbl_len; 2103 2104 return 0; 2105 } 2106 2107 int 2108 irdma_iw_port_immutable(struct ib_device *ibdev, u8 port_num, 2109 struct ib_port_immutable *immutable) 2110 { 2111 struct ib_port_attr attr; 2112 int err; 2113 2114 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 2115 err = ib_query_port(ibdev, port_num, &attr); 2116 if (err) 2117 return err; 2118 immutable->gid_tbl_len = 1; 2119 2120 return 0; 2121 } 2122 2123 /** 2124 * irdma_query_port - get port attributes 2125 * @ibdev: device pointer from stack 2126 * @port: port number for query 2127 * @props: returning device attributes 2128 */ 2129 int 2130 irdma_query_port(struct ib_device *ibdev, u8 port, 2131 struct ib_port_attr *props) 2132 { 2133 struct irdma_device *iwdev = to_iwdev(ibdev); 2134 if_t netdev = iwdev->netdev; 2135 2136 /* no need to zero out pros here. done by caller */ 2137 2138 props->max_mtu = IB_MTU_4096; 2139 props->active_mtu = ib_mtu_int_to_enum(if_getmtu(netdev)); 2140 props->lid = 1; 2141 props->lmc = 0; 2142 props->sm_lid = 0; 2143 props->sm_sl = 0; 2144 if ((if_getlinkstate(netdev) == LINK_STATE_UP) && 2145 (if_getdrvflags(netdev) & IFF_DRV_RUNNING)) { 2146 props->state = IB_PORT_ACTIVE; 2147 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 2148 } else { 2149 props->state = IB_PORT_DOWN; 2150 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 2151 } 2152 ib_get_eth_speed(ibdev, port, &props->active_speed, &props->active_width); 2153 2154 if (rdma_protocol_roce(ibdev, 1)) { 2155 props->gid_tbl_len = 32; 2156 kc_set_props_ip_gid_caps(props); 2157 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ; 2158 } else { 2159 props->gid_tbl_len = 1; 2160 } 2161 props->qkey_viol_cntr = 0; 2162 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP; 2163 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size; 2164 2165 return 0; 2166 } 2167 2168 static const char *const irdma_hw_stat_names[] = { 2169 /* gen1 - 32-bit */ 2170 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards", 2171 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts", 2172 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes", 2173 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards", 2174 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts", 2175 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes", 2176 [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors", 2177 /* gen1 - 64-bit */ 2178 [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = "ip4InOctets", 2179 [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = "ip4InPkts", 2180 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = "ip4InReasmRqd", 2181 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = "ip4InMcastPkts", 2182 [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = "ip4OutOctets", 2183 [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = "ip4OutPkts", 2184 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = "ip4OutSegRqd", 2185 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = "ip4OutMcastPkts", 2186 [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = "ip6InOctets", 2187 [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = "ip6InPkts", 2188 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = "ip6InReasmRqd", 2189 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = "ip6InMcastPkts", 2190 [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = "ip6OutOctets", 2191 [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = "ip6OutPkts", 2192 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = "ip6OutSegRqd", 2193 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = "ip6OutMcastPkts", 2194 [IRDMA_HW_STAT_INDEX_RDMARXRDS] = "InRdmaReads", 2195 [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = "InRdmaSends", 2196 [IRDMA_HW_STAT_INDEX_RDMARXWRS] = "InRdmaWrites", 2197 [IRDMA_HW_STAT_INDEX_RDMATXRDS] = "OutRdmaReads", 2198 [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = "OutRdmaSends", 2199 [IRDMA_HW_STAT_INDEX_RDMATXWRS] = "OutRdmaWrites", 2200 [IRDMA_HW_STAT_INDEX_RDMAVBND] = "RdmaBnd", 2201 [IRDMA_HW_STAT_INDEX_RDMAVINV] = "RdmaInv", 2202 2203 /* gen2 - 32-bit */ 2204 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled", 2205 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored", 2206 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent", 2207 /* gen2 - 64-bit */ 2208 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = "ip4InMcastOctets", 2209 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = "ip4OutMcastOctets", 2210 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = "ip6InMcastOctets", 2211 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = "ip6OutMcastOctets", 2212 [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = "RxUDP", 2213 [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = "TxUDP", 2214 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = "RxECNMrkd", 2215 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "RetransSegs", 2216 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "InOptErrors", 2217 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "InProtoErrors", 2218 [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = "InSegs", 2219 [IRDMA_HW_STAT_INDEX_TCPTXSEG] = "OutSegs", 2220 }; 2221 2222 /** 2223 * irdma_alloc_hw_stats - Allocate a hw stats structure 2224 * @ibdev: device pointer from stack 2225 * @port_num: port number 2226 */ 2227 struct rdma_hw_stats * 2228 irdma_alloc_hw_stats(struct ib_device *ibdev, 2229 u8 port_num) 2230 { 2231 struct irdma_device *iwdev = to_iwdev(ibdev); 2232 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev; 2233 2234 int num_counters = dev->hw_attrs.max_stat_idx; 2235 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; 2236 2237 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters, 2238 lifespan); 2239 } 2240 2241 /** 2242 * irdma_get_hw_stats - Populates the rdma_hw_stats structure 2243 * @ibdev: device pointer from stack 2244 * @stats: stats pointer from stack 2245 * @port_num: port number 2246 * @index: which hw counter the stack is requesting we update 2247 */ 2248 int 2249 irdma_get_hw_stats(struct ib_device *ibdev, 2250 struct rdma_hw_stats *stats, u8 port_num, 2251 int index) 2252 { 2253 struct irdma_device *iwdev = to_iwdev(ibdev); 2254 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats; 2255 2256 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) 2257 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true); 2258 2259 memcpy(&stats->value[0], hw_stats, sizeof(u64)* stats->num_counters); 2260 2261 return stats->num_counters; 2262 } 2263 2264 /** 2265 * irdma_query_gid - Query port GID 2266 * @ibdev: device pointer from stack 2267 * @port: port number 2268 * @index: Entry index 2269 * @gid: Global ID 2270 */ 2271 int 2272 irdma_query_gid(struct ib_device *ibdev, u8 port, int index, 2273 union ib_gid *gid) 2274 { 2275 struct irdma_device *iwdev = to_iwdev(ibdev); 2276 2277 memset(gid->raw, 0, sizeof(gid->raw)); 2278 ether_addr_copy(gid->raw, if_getlladdr(iwdev->netdev)); 2279 2280 return 0; 2281 } 2282 2283 enum rdma_link_layer 2284 irdma_get_link_layer(struct ib_device *ibdev, 2285 u8 port_num) 2286 { 2287 return IB_LINK_LAYER_ETHERNET; 2288 } 2289 2290 inline enum ib_mtu 2291 ib_mtu_int_to_enum(int mtu) 2292 { 2293 if (mtu >= 4096) 2294 return IB_MTU_4096; 2295 else if (mtu >= 2048) 2296 return IB_MTU_2048; 2297 else if (mtu >= 1024) 2298 return IB_MTU_1024; 2299 else if (mtu >= 512) 2300 return IB_MTU_512; 2301 else 2302 return IB_MTU_256; 2303 } 2304 2305 inline void 2306 kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev) 2307 { 2308 iwdev->ibdev.uverbs_cmd_mask |= 2309 BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) | 2310 BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) | 2311 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) | 2312 BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST); 2313 } 2314 2315 inline void 2316 kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev) 2317 { 2318 iwdev->ibdev.uverbs_cmd_mask = 2319 BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | 2320 BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) | 2321 BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) | 2322 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) | 2323 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) | 2324 BIT_ULL(IB_USER_VERBS_CMD_REG_MR) | 2325 BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) | 2326 BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) | 2327 BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2328 BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) | 2329 BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ) | 2330 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) | 2331 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 2332 BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) | 2333 BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) | 2334 BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) | 2335 BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) | 2336 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) | 2337 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) | 2338 BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) | 2339 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) | 2340 BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) | 2341 BIT_ULL(IB_USER_VERBS_CMD_POST_SEND); 2342 iwdev->ibdev.uverbs_ex_cmd_mask = 2343 BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) | 2344 BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE); 2345 2346 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2) 2347 iwdev->ibdev.uverbs_ex_cmd_mask |= BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ); 2348 } 2349 2350 int 2351 ib_get_eth_speed(struct ib_device *ibdev, u32 port_num, u8 *speed, u8 *width) 2352 { 2353 if_t netdev = ibdev->get_netdev(ibdev, port_num); 2354 u32 netdev_speed; 2355 2356 if (!netdev) 2357 return -ENODEV; 2358 2359 netdev_speed = if_getbaudrate(netdev); 2360 dev_put(netdev); 2361 if (netdev_speed <= SPEED_1000) { 2362 *width = IB_WIDTH_1X; 2363 *speed = IB_SPEED_SDR; 2364 } else if (netdev_speed <= SPEED_10000) { 2365 *width = IB_WIDTH_1X; 2366 *speed = IB_SPEED_FDR10; 2367 } else if (netdev_speed <= SPEED_20000) { 2368 *width = IB_WIDTH_4X; 2369 *speed = IB_SPEED_DDR; 2370 } else if (netdev_speed <= SPEED_25000) { 2371 *width = IB_WIDTH_1X; 2372 *speed = IB_SPEED_EDR; 2373 } else if (netdev_speed <= SPEED_40000) { 2374 *width = IB_WIDTH_4X; 2375 *speed = IB_SPEED_FDR10; 2376 } else { 2377 *width = IB_WIDTH_4X; 2378 *speed = IB_SPEED_EDR; 2379 } 2380 2381 return 0; 2382 } 2383