1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */ 4 /* Kai Shen <kaishen@linux.alibaba.com> */ 5 /* Copyright (c) 2020-2022, Alibaba Group. */ 6 7 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 8 /* Copyright (c) 2008-2019, IBM Corporation */ 9 10 /* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */ 11 12 #include <linux/vmalloc.h> 13 #include <net/addrconf.h> 14 #include <rdma/erdma-abi.h> 15 #include <rdma/ib_umem.h> 16 #include <rdma/uverbs_ioctl.h> 17 18 #include "erdma.h" 19 #include "erdma_cm.h" 20 #include "erdma_verbs.h" 21 22 static void assemble_qbuf_mtt_for_cmd(struct erdma_mem *mem, u32 *cfg, 23 u64 *addr0, u64 *addr1) 24 { 25 struct erdma_mtt *mtt = mem->mtt; 26 27 if (mem->mtt_nents > ERDMA_MAX_INLINE_MTT_ENTRIES) { 28 *addr0 = mtt->buf_dma; 29 *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK, 30 ERDMA_MR_MTT_1LEVEL); 31 } else { 32 *addr0 = mtt->buf[0]; 33 memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1)); 34 *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK, 35 ERDMA_MR_MTT_0LEVEL); 36 } 37 } 38 39 static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) 40 { 41 struct erdma_dev *dev = to_edev(qp->ibqp.device); 42 struct erdma_pd *pd = to_epd(qp->ibqp.pd); 43 struct erdma_cmdq_create_qp_req req; 44 struct erdma_uqp *user_qp; 45 u64 resp0, resp1; 46 int err; 47 48 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 49 CMDQ_OPCODE_CREATE_QP); 50 51 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK, 52 ilog2(qp->attrs.sq_size)) | 53 FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp)); 54 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK, 55 ilog2(qp->attrs.rq_size)) | 56 FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn); 57 58 if (qp->ibqp.qp_type == IB_QPT_RC) 59 req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK, 60 ERDMA_QPT_RC); 61 else 62 req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK, 63 ERDMA_QPT_UD); 64 65 if (rdma_is_kernel_res(&qp->ibqp.res)) { 66 u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT; 67 68 req.sq_cqn_mtt_cfg = 69 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 70 pgsz_range) | 71 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn); 72 req.rq_cqn_mtt_cfg = 73 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 74 pgsz_range) | 75 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn); 76 77 req.sq_mtt_cfg = 78 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) | 79 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) | 80 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK, 81 ERDMA_MR_MTT_0LEVEL); 82 req.rq_mtt_cfg = req.sq_mtt_cfg; 83 84 req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr; 85 req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr; 86 req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma; 87 req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma; 88 } else { 89 user_qp = &qp->user_qp; 90 req.sq_cqn_mtt_cfg = FIELD_PREP( 91 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 92 ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT); 93 req.sq_cqn_mtt_cfg |= 94 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn); 95 96 req.rq_cqn_mtt_cfg = FIELD_PREP( 97 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 98 ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT); 99 req.rq_cqn_mtt_cfg |= 100 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn); 101 102 req.sq_mtt_cfg = user_qp->sq_mem.page_offset; 103 req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 104 user_qp->sq_mem.mtt_nents); 105 106 req.rq_mtt_cfg = user_qp->rq_mem.page_offset; 107 req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 108 user_qp->rq_mem.mtt_nents); 109 110 assemble_qbuf_mtt_for_cmd(&user_qp->sq_mem, &req.sq_mtt_cfg, 111 &req.sq_buf_addr, req.sq_mtt_entry); 112 assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg, 113 &req.rq_buf_addr, req.rq_mtt_entry); 114 115 req.sq_dbrec_dma = user_qp->sq_dbrec_dma; 116 req.rq_dbrec_dma = user_qp->rq_dbrec_dma; 117 118 if (uctx->ext_db.enable) { 119 req.sq_cqn_mtt_cfg |= 120 FIELD_PREP(ERDMA_CMD_CREATE_QP_DB_CFG_MASK, 1); 121 req.db_cfg = 122 FIELD_PREP(ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK, 123 uctx->ext_db.sdb_off) | 124 FIELD_PREP(ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK, 125 uctx->ext_db.rdb_off); 126 } 127 } 128 129 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1, 130 true); 131 if (!err && erdma_device_iwarp(dev)) 132 qp->attrs.iwarp.cookie = 133 FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0); 134 135 return err; 136 } 137 138 static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) 139 { 140 struct erdma_pd *pd = to_epd(mr->ibmr.pd); 141 u32 mtt_level = ERDMA_MR_MTT_0LEVEL; 142 struct erdma_cmdq_reg_mr_req req; 143 144 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR); 145 146 if (mr->type == ERDMA_MR_TYPE_FRMR || 147 mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) { 148 if (mr->mem.mtt->continuous) { 149 req.phy_addr[0] = mr->mem.mtt->buf_dma; 150 mtt_level = ERDMA_MR_MTT_1LEVEL; 151 } else { 152 req.phy_addr[0] = mr->mem.mtt->dma_addrs[0]; 153 mtt_level = mr->mem.mtt->level; 154 } 155 } else if (mr->type != ERDMA_MR_TYPE_DMA) { 156 memcpy(req.phy_addr, mr->mem.mtt->buf, 157 MTT_SIZE(mr->mem.page_cnt)); 158 } 159 160 req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) | 161 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) | 162 FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8); 163 req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) | 164 FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) | 165 FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access); 166 req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK, 167 ilog2(mr->mem.page_size)) | 168 FIELD_PREP(ERDMA_CMD_REGMR_MTT_LEVEL_MASK, mtt_level) | 169 FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt); 170 171 if (mr->type == ERDMA_MR_TYPE_DMA) 172 goto post_cmd; 173 174 if (mr->type == ERDMA_MR_TYPE_NORMAL) { 175 req.start_va = mr->mem.va; 176 req.size = mr->mem.len; 177 } 178 179 if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) { 180 req.cfg0 |= FIELD_PREP(ERDMA_CMD_MR_VERSION_MASK, 1); 181 req.cfg2 |= FIELD_PREP(ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK, 182 PAGE_SHIFT - ERDMA_HW_PAGE_SHIFT); 183 req.size_h = upper_32_bits(mr->mem.len); 184 req.mtt_cnt_h = mr->mem.page_cnt >> 20; 185 } 186 187 post_cmd: 188 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 189 true); 190 } 191 192 static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq) 193 { 194 struct erdma_dev *dev = to_edev(cq->ibcq.device); 195 struct erdma_cmdq_create_cq_req req; 196 struct erdma_mem *mem; 197 u32 page_size; 198 199 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 200 CMDQ_OPCODE_CREATE_CQ); 201 202 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) | 203 FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth)); 204 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn); 205 206 if (rdma_is_kernel_res(&cq->ibcq.res)) { 207 page_size = SZ_32M; 208 req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK, 209 ilog2(page_size) - ERDMA_HW_PAGE_SHIFT); 210 req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr); 211 req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr); 212 213 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) | 214 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK, 215 ERDMA_MR_MTT_0LEVEL); 216 217 req.first_page_offset = 0; 218 req.cq_dbrec_dma = cq->kern_cq.dbrec_dma; 219 } else { 220 mem = &cq->user_cq.qbuf_mem; 221 req.cfg0 |= 222 FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK, 223 ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT); 224 if (mem->mtt_nents == 1) { 225 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]); 226 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]); 227 req.cfg1 |= 228 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK, 229 ERDMA_MR_MTT_0LEVEL); 230 } else { 231 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma); 232 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma); 233 req.cfg1 |= 234 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK, 235 ERDMA_MR_MTT_1LEVEL); 236 } 237 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 238 mem->mtt_nents); 239 240 req.first_page_offset = mem->page_offset; 241 req.cq_dbrec_dma = cq->user_cq.dbrec_dma; 242 243 if (uctx->ext_db.enable) { 244 req.cfg1 |= FIELD_PREP( 245 ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK, 1); 246 req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_DB_CFG_MASK, 247 uctx->ext_db.cdb_off); 248 } 249 } 250 251 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 252 true); 253 } 254 255 static int erdma_alloc_idx(struct erdma_resource_cb *res_cb) 256 { 257 int idx; 258 unsigned long flags; 259 260 spin_lock_irqsave(&res_cb->lock, flags); 261 idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap, 262 res_cb->next_alloc_idx); 263 if (idx == res_cb->max_cap) { 264 idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap); 265 if (idx == res_cb->max_cap) { 266 res_cb->next_alloc_idx = 1; 267 spin_unlock_irqrestore(&res_cb->lock, flags); 268 return -ENOSPC; 269 } 270 } 271 272 set_bit(idx, res_cb->bitmap); 273 res_cb->next_alloc_idx = idx + 1; 274 spin_unlock_irqrestore(&res_cb->lock, flags); 275 276 return idx; 277 } 278 279 static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx) 280 { 281 unsigned long flags; 282 u32 used; 283 284 spin_lock_irqsave(&res_cb->lock, flags); 285 used = __test_and_clear_bit(idx, res_cb->bitmap); 286 spin_unlock_irqrestore(&res_cb->lock, flags); 287 WARN_ON(!used); 288 } 289 290 static struct rdma_user_mmap_entry * 291 erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address, 292 u32 size, u8 mmap_flag, u64 *mmap_offset) 293 { 294 struct erdma_user_mmap_entry *entry = 295 kzalloc(sizeof(*entry), GFP_KERNEL); 296 int ret; 297 298 if (!entry) 299 return NULL; 300 301 entry->address = (u64)address; 302 entry->mmap_flag = mmap_flag; 303 304 size = PAGE_ALIGN(size); 305 306 ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry, 307 size); 308 if (ret) { 309 kfree(entry); 310 return NULL; 311 } 312 313 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 314 315 return &entry->rdma_entry; 316 } 317 318 int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, 319 struct ib_udata *unused) 320 { 321 struct erdma_dev *dev = to_edev(ibdev); 322 323 memset(attr, 0, sizeof(*attr)); 324 325 attr->max_mr_size = dev->attrs.max_mr_size; 326 attr->vendor_id = PCI_VENDOR_ID_ALIBABA; 327 attr->vendor_part_id = dev->pdev->device; 328 attr->hw_ver = dev->pdev->revision; 329 attr->max_qp = dev->attrs.max_qp - 1; 330 attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr); 331 attr->max_qp_rd_atom = dev->attrs.max_ord; 332 attr->max_qp_init_rd_atom = dev->attrs.max_ird; 333 attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird; 334 attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS; 335 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 336 ibdev->local_dma_lkey = dev->attrs.local_dma_key; 337 attr->max_send_sge = dev->attrs.max_send_sge; 338 attr->max_recv_sge = dev->attrs.max_recv_sge; 339 attr->max_sge_rd = dev->attrs.max_sge_rd; 340 attr->max_cq = dev->attrs.max_cq - 1; 341 attr->max_cqe = dev->attrs.max_cqe; 342 attr->max_mr = dev->attrs.max_mr; 343 attr->max_pd = dev->attrs.max_pd; 344 attr->max_mw = dev->attrs.max_mw; 345 attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA; 346 attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT; 347 348 if (erdma_device_rocev2(dev)) { 349 attr->max_pkeys = ERDMA_MAX_PKEYS; 350 attr->max_ah = dev->attrs.max_ah; 351 } 352 353 if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC) 354 attr->atomic_cap = IB_ATOMIC_GLOB; 355 356 attr->fw_ver = dev->attrs.fw_version; 357 358 if (dev->netdev) 359 addrconf_addr_eui48((u8 *)&attr->sys_image_guid, 360 dev->netdev->dev_addr); 361 362 return 0; 363 } 364 365 int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx, 366 union ib_gid *gid) 367 { 368 struct erdma_dev *dev = to_edev(ibdev); 369 370 memset(gid, 0, sizeof(*gid)); 371 ether_addr_copy(gid->raw, dev->attrs.peer_addr); 372 373 return 0; 374 } 375 376 int erdma_query_port(struct ib_device *ibdev, u32 port, 377 struct ib_port_attr *attr) 378 { 379 struct erdma_dev *dev = to_edev(ibdev); 380 struct net_device *ndev = dev->netdev; 381 382 memset(attr, 0, sizeof(*attr)); 383 384 if (erdma_device_iwarp(dev)) { 385 attr->gid_tbl_len = 1; 386 } else { 387 attr->gid_tbl_len = dev->attrs.max_gid; 388 attr->ip_gids = true; 389 attr->pkey_tbl_len = ERDMA_MAX_PKEYS; 390 } 391 392 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; 393 attr->max_msg_sz = -1; 394 395 if (!ndev) 396 goto out; 397 398 ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width); 399 attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu); 400 attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu); 401 attr->state = ib_get_curr_port_state(ndev); 402 403 out: 404 if (attr->state == IB_PORT_ACTIVE) 405 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 406 else 407 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; 408 409 return 0; 410 } 411 412 int erdma_get_port_immutable(struct ib_device *ibdev, u32 port, 413 struct ib_port_immutable *port_immutable) 414 { 415 struct erdma_dev *dev = to_edev(ibdev); 416 417 if (erdma_device_iwarp(dev)) { 418 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 419 port_immutable->gid_tbl_len = 1; 420 } else { 421 port_immutable->core_cap_flags = 422 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 423 port_immutable->max_mad_size = IB_MGMT_MAD_SIZE; 424 port_immutable->gid_tbl_len = dev->attrs.max_gid; 425 port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS; 426 } 427 428 return 0; 429 } 430 431 int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 432 { 433 struct erdma_pd *pd = to_epd(ibpd); 434 struct erdma_dev *dev = to_edev(ibpd->device); 435 int pdn; 436 437 pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]); 438 if (pdn < 0) 439 return pdn; 440 441 pd->pdn = pdn; 442 443 return 0; 444 } 445 446 int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 447 { 448 struct erdma_pd *pd = to_epd(ibpd); 449 struct erdma_dev *dev = to_edev(ibpd->device); 450 451 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn); 452 453 return 0; 454 } 455 456 static void erdma_flush_worker(struct work_struct *work) 457 { 458 struct delayed_work *dwork = to_delayed_work(work); 459 struct erdma_qp *qp = 460 container_of(dwork, struct erdma_qp, reflush_dwork); 461 struct erdma_cmdq_reflush_req req; 462 463 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 464 CMDQ_OPCODE_REFLUSH); 465 req.qpn = QP_ID(qp); 466 req.sq_pi = qp->kern_qp.sq_pi; 467 req.rq_pi = qp->kern_qp.rq_pi; 468 erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL, 469 true); 470 } 471 472 static int erdma_qp_validate_cap(struct erdma_dev *dev, 473 struct ib_qp_init_attr *attrs) 474 { 475 if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) || 476 (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) || 477 (attrs->cap.max_send_sge > dev->attrs.max_send_sge) || 478 (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) || 479 (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) || 480 !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) { 481 return -EINVAL; 482 } 483 484 return 0; 485 } 486 487 static int erdma_qp_validate_attr(struct erdma_dev *dev, 488 struct ib_qp_init_attr *attrs) 489 { 490 if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC) 491 return -EOPNOTSUPP; 492 493 if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC && 494 attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI) 495 return -EOPNOTSUPP; 496 497 if (attrs->srq) 498 return -EOPNOTSUPP; 499 500 if (!attrs->send_cq || !attrs->recv_cq) 501 return -EOPNOTSUPP; 502 503 return 0; 504 } 505 506 static void free_kernel_qp(struct erdma_qp *qp) 507 { 508 struct erdma_dev *dev = qp->dev; 509 510 vfree(qp->kern_qp.swr_tbl); 511 vfree(qp->kern_qp.rwr_tbl); 512 513 if (qp->kern_qp.sq_buf) 514 dma_free_coherent(&dev->pdev->dev, 515 qp->attrs.sq_size << SQEBB_SHIFT, 516 qp->kern_qp.sq_buf, 517 qp->kern_qp.sq_buf_dma_addr); 518 519 if (qp->kern_qp.sq_dbrec) 520 dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec, 521 qp->kern_qp.sq_dbrec_dma); 522 523 if (qp->kern_qp.rq_buf) 524 dma_free_coherent(&dev->pdev->dev, 525 qp->attrs.rq_size << RQE_SHIFT, 526 qp->kern_qp.rq_buf, 527 qp->kern_qp.rq_buf_dma_addr); 528 529 if (qp->kern_qp.rq_dbrec) 530 dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec, 531 qp->kern_qp.rq_dbrec_dma); 532 } 533 534 static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp, 535 struct ib_qp_init_attr *attrs) 536 { 537 struct erdma_kqp *kqp = &qp->kern_qp; 538 int size; 539 540 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) 541 kqp->sig_all = 1; 542 543 kqp->sq_pi = 0; 544 kqp->sq_ci = 0; 545 kqp->rq_pi = 0; 546 kqp->rq_ci = 0; 547 kqp->hw_sq_db = 548 dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT); 549 kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET; 550 551 kqp->swr_tbl = vmalloc_array(qp->attrs.sq_size, sizeof(u64)); 552 kqp->rwr_tbl = vmalloc_array(qp->attrs.rq_size, sizeof(u64)); 553 if (!kqp->swr_tbl || !kqp->rwr_tbl) 554 goto err_out; 555 556 size = qp->attrs.sq_size << SQEBB_SHIFT; 557 kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size, 558 &kqp->sq_buf_dma_addr, GFP_KERNEL); 559 if (!kqp->sq_buf) 560 goto err_out; 561 562 kqp->sq_dbrec = 563 dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma); 564 if (!kqp->sq_dbrec) 565 goto err_out; 566 567 size = qp->attrs.rq_size << RQE_SHIFT; 568 kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size, 569 &kqp->rq_buf_dma_addr, GFP_KERNEL); 570 if (!kqp->rq_buf) 571 goto err_out; 572 573 kqp->rq_dbrec = 574 dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma); 575 if (!kqp->rq_dbrec) 576 goto err_out; 577 578 return 0; 579 580 err_out: 581 free_kernel_qp(qp); 582 return -ENOMEM; 583 } 584 585 static void erdma_fill_bottom_mtt(struct erdma_dev *dev, struct erdma_mem *mem) 586 { 587 struct erdma_mtt *mtt = mem->mtt; 588 struct ib_block_iter biter; 589 u32 idx = 0; 590 591 while (mtt->low_level) 592 mtt = mtt->low_level; 593 594 rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) 595 mtt->buf[idx++] = rdma_block_iter_dma_address(&biter); 596 } 597 598 static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev, 599 size_t size) 600 { 601 struct erdma_mtt *mtt; 602 603 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL); 604 if (!mtt) 605 return ERR_PTR(-ENOMEM); 606 607 mtt->size = size; 608 mtt->buf = kzalloc(mtt->size, GFP_KERNEL); 609 if (!mtt->buf) 610 goto err_free_mtt; 611 612 mtt->continuous = true; 613 mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size, 614 DMA_TO_DEVICE); 615 if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma)) 616 goto err_free_mtt_buf; 617 618 return mtt; 619 620 err_free_mtt_buf: 621 kfree(mtt->buf); 622 623 err_free_mtt: 624 kfree(mtt); 625 626 return ERR_PTR(-ENOMEM); 627 } 628 629 static void erdma_unmap_page_list(struct erdma_dev *dev, dma_addr_t *pg_dma, 630 u32 npages) 631 { 632 u32 i; 633 634 for (i = 0; i < npages; i++) 635 dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE, 636 DMA_TO_DEVICE); 637 } 638 639 static void erdma_destroy_mtt_buf_dma_addrs(struct erdma_dev *dev, 640 struct erdma_mtt *mtt) 641 { 642 erdma_unmap_page_list(dev, mtt->dma_addrs, mtt->npages); 643 vfree(mtt->dma_addrs); 644 } 645 646 static void erdma_destroy_scatter_mtt(struct erdma_dev *dev, 647 struct erdma_mtt *mtt) 648 { 649 erdma_destroy_mtt_buf_dma_addrs(dev, mtt); 650 vfree(mtt->buf); 651 kfree(mtt); 652 } 653 654 static void erdma_init_middle_mtt(struct erdma_mtt *mtt, 655 struct erdma_mtt *low_mtt) 656 { 657 dma_addr_t *pg_addr = mtt->buf; 658 u32 i; 659 660 for (i = 0; i < low_mtt->npages; i++) 661 pg_addr[i] = low_mtt->dma_addrs[i]; 662 } 663 664 static u32 vmalloc_to_dma_addrs(struct erdma_dev *dev, dma_addr_t **dma_addrs, 665 void *buf, u64 len) 666 { 667 dma_addr_t *pg_dma; 668 struct page *pg; 669 u32 npages, i; 670 void *addr; 671 672 npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >> 673 PAGE_SHIFT; 674 pg_dma = vzalloc(npages * sizeof(dma_addr_t)); 675 if (!pg_dma) 676 return 0; 677 678 addr = buf; 679 for (i = 0; i < npages; i++) { 680 pg = vmalloc_to_page(addr); 681 if (!pg) 682 goto err; 683 684 pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE, 685 DMA_TO_DEVICE); 686 if (dma_mapping_error(&dev->pdev->dev, pg_dma[i])) 687 goto err; 688 689 addr += PAGE_SIZE; 690 } 691 692 *dma_addrs = pg_dma; 693 694 return npages; 695 err: 696 erdma_unmap_page_list(dev, pg_dma, i); 697 vfree(pg_dma); 698 699 return 0; 700 } 701 702 static int erdma_create_mtt_buf_dma_addrs(struct erdma_dev *dev, 703 struct erdma_mtt *mtt) 704 { 705 dma_addr_t *addrs; 706 u32 npages; 707 708 /* Failed if buf is not page aligned */ 709 if ((uintptr_t)mtt->buf & ~PAGE_MASK) 710 return -EINVAL; 711 712 npages = vmalloc_to_dma_addrs(dev, &addrs, mtt->buf, mtt->size); 713 if (!npages) 714 return -ENOMEM; 715 716 mtt->dma_addrs = addrs; 717 mtt->npages = npages; 718 719 return 0; 720 } 721 722 static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev, 723 size_t size) 724 { 725 struct erdma_mtt *mtt; 726 int ret = -ENOMEM; 727 728 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL); 729 if (!mtt) 730 return ERR_PTR(-ENOMEM); 731 732 mtt->size = ALIGN(size, PAGE_SIZE); 733 mtt->buf = vzalloc(mtt->size); 734 mtt->continuous = false; 735 if (!mtt->buf) 736 goto err_free_mtt; 737 738 ret = erdma_create_mtt_buf_dma_addrs(dev, mtt); 739 if (ret) 740 goto err_free_mtt_buf; 741 742 ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, npages:%u\n", 743 mtt->size, mtt->npages); 744 745 return mtt; 746 747 err_free_mtt_buf: 748 vfree(mtt->buf); 749 750 err_free_mtt: 751 kfree(mtt); 752 753 return ERR_PTR(ret); 754 } 755 756 static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size, 757 bool force_continuous) 758 { 759 struct erdma_mtt *mtt, *tmp_mtt; 760 int ret, level = 0; 761 762 ibdev_dbg(&dev->ibdev, "create_mtt, size:%lu, force cont:%d\n", size, 763 force_continuous); 764 765 if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_MTT_VA)) 766 force_continuous = true; 767 768 if (force_continuous) 769 return erdma_create_cont_mtt(dev, size); 770 771 mtt = erdma_create_scatter_mtt(dev, size); 772 if (IS_ERR(mtt)) 773 return mtt; 774 level = 1; 775 776 /* convergence the mtt table. */ 777 while (mtt->npages != 1 && level <= 3) { 778 tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->npages)); 779 if (IS_ERR(tmp_mtt)) { 780 ret = PTR_ERR(tmp_mtt); 781 goto err_free_mtt; 782 } 783 erdma_init_middle_mtt(tmp_mtt, mtt); 784 tmp_mtt->low_level = mtt; 785 mtt = tmp_mtt; 786 level++; 787 } 788 789 if (level > 3) { 790 ret = -ENOMEM; 791 goto err_free_mtt; 792 } 793 794 mtt->level = level; 795 ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n", 796 mtt->level, mtt->dma_addrs[0]); 797 798 return mtt; 799 err_free_mtt: 800 while (mtt) { 801 tmp_mtt = mtt->low_level; 802 erdma_destroy_scatter_mtt(dev, mtt); 803 mtt = tmp_mtt; 804 } 805 806 return ERR_PTR(ret); 807 } 808 809 static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt) 810 { 811 struct erdma_mtt *tmp_mtt; 812 813 if (mtt->continuous) { 814 dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size, 815 DMA_TO_DEVICE); 816 kfree(mtt->buf); 817 kfree(mtt); 818 } else { 819 while (mtt) { 820 tmp_mtt = mtt->low_level; 821 erdma_destroy_scatter_mtt(dev, mtt); 822 mtt = tmp_mtt; 823 } 824 } 825 } 826 827 static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem, 828 u64 start, u64 len, int access, u64 virt, 829 unsigned long req_page_size, bool force_continuous) 830 { 831 int ret = 0; 832 833 mem->umem = ib_umem_get(&dev->ibdev, start, len, access); 834 if (IS_ERR(mem->umem)) { 835 ret = PTR_ERR(mem->umem); 836 mem->umem = NULL; 837 return ret; 838 } 839 840 mem->va = virt; 841 mem->len = len; 842 mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt); 843 mem->page_offset = start & (mem->page_size - 1); 844 mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size); 845 mem->page_cnt = mem->mtt_nents; 846 mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt), 847 force_continuous); 848 if (IS_ERR(mem->mtt)) { 849 ret = PTR_ERR(mem->mtt); 850 goto error_ret; 851 } 852 853 erdma_fill_bottom_mtt(dev, mem); 854 855 return 0; 856 857 error_ret: 858 if (mem->umem) { 859 ib_umem_release(mem->umem); 860 mem->umem = NULL; 861 } 862 863 return ret; 864 } 865 866 static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem) 867 { 868 if (mem->mtt) 869 erdma_destroy_mtt(dev, mem->mtt); 870 871 if (mem->umem) { 872 ib_umem_release(mem->umem); 873 mem->umem = NULL; 874 } 875 } 876 877 static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx, 878 u64 dbrecords_va, 879 struct erdma_user_dbrecords_page **dbr_page, 880 dma_addr_t *dma_addr) 881 { 882 struct erdma_user_dbrecords_page *page = NULL; 883 int rv = 0; 884 885 mutex_lock(&ctx->dbrecords_page_mutex); 886 887 list_for_each_entry(page, &ctx->dbrecords_page_list, list) 888 if (page->va == (dbrecords_va & PAGE_MASK)) 889 goto found; 890 891 page = kmalloc(sizeof(*page), GFP_KERNEL); 892 if (!page) { 893 rv = -ENOMEM; 894 goto out; 895 } 896 897 page->va = (dbrecords_va & PAGE_MASK); 898 page->refcnt = 0; 899 900 page->umem = ib_umem_get(ctx->ibucontext.device, 901 dbrecords_va & PAGE_MASK, PAGE_SIZE, 0); 902 if (IS_ERR(page->umem)) { 903 rv = PTR_ERR(page->umem); 904 kfree(page); 905 goto out; 906 } 907 908 list_add(&page->list, &ctx->dbrecords_page_list); 909 910 found: 911 *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) + 912 (dbrecords_va & ~PAGE_MASK); 913 *dbr_page = page; 914 page->refcnt++; 915 916 out: 917 mutex_unlock(&ctx->dbrecords_page_mutex); 918 return rv; 919 } 920 921 static void 922 erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx, 923 struct erdma_user_dbrecords_page **dbr_page) 924 { 925 if (!ctx || !(*dbr_page)) 926 return; 927 928 mutex_lock(&ctx->dbrecords_page_mutex); 929 if (--(*dbr_page)->refcnt == 0) { 930 list_del(&(*dbr_page)->list); 931 ib_umem_release((*dbr_page)->umem); 932 kfree(*dbr_page); 933 } 934 935 *dbr_page = NULL; 936 mutex_unlock(&ctx->dbrecords_page_mutex); 937 } 938 939 static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, 940 u64 va, u32 len, u64 dbrec_va) 941 { 942 dma_addr_t dbrec_dma; 943 u32 rq_offset; 944 int ret; 945 946 if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) + 947 qp->attrs.rq_size * RQE_SIZE)) 948 return -EINVAL; 949 950 ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va, 951 qp->attrs.sq_size << SQEBB_SHIFT, 0, va, 952 (SZ_1M - SZ_4K), true); 953 if (ret) 954 return ret; 955 956 rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE); 957 qp->user_qp.rq_offset = rq_offset; 958 959 ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset, 960 qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset, 961 (SZ_1M - SZ_4K), true); 962 if (ret) 963 goto put_sq_mtt; 964 965 ret = erdma_map_user_dbrecords(uctx, dbrec_va, 966 &qp->user_qp.user_dbr_page, 967 &dbrec_dma); 968 if (ret) 969 goto put_rq_mtt; 970 971 qp->user_qp.sq_dbrec_dma = dbrec_dma; 972 qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE; 973 974 return 0; 975 976 put_rq_mtt: 977 put_mtt_entries(qp->dev, &qp->user_qp.rq_mem); 978 979 put_sq_mtt: 980 put_mtt_entries(qp->dev, &qp->user_qp.sq_mem); 981 982 return ret; 983 } 984 985 static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx) 986 { 987 put_mtt_entries(qp->dev, &qp->user_qp.sq_mem); 988 put_mtt_entries(qp->dev, &qp->user_qp.rq_mem); 989 erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page); 990 } 991 992 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, 993 struct ib_udata *udata) 994 { 995 struct erdma_qp *qp = to_eqp(ibqp); 996 struct erdma_dev *dev = to_edev(ibqp->device); 997 struct erdma_ucontext *uctx = rdma_udata_to_drv_context( 998 udata, struct erdma_ucontext, ibucontext); 999 struct erdma_ureq_create_qp ureq; 1000 struct erdma_uresp_create_qp uresp; 1001 void *old_entry; 1002 int ret = 0; 1003 1004 ret = erdma_qp_validate_cap(dev, attrs); 1005 if (ret) 1006 goto err_out; 1007 1008 ret = erdma_qp_validate_attr(dev, attrs); 1009 if (ret) 1010 goto err_out; 1011 1012 qp->scq = to_ecq(attrs->send_cq); 1013 qp->rcq = to_ecq(attrs->recv_cq); 1014 qp->dev = dev; 1015 qp->attrs.cc = dev->attrs.cc; 1016 1017 init_rwsem(&qp->state_lock); 1018 kref_init(&qp->ref); 1019 init_completion(&qp->safe_free); 1020 1021 if (qp->ibqp.qp_type == IB_QPT_GSI) { 1022 old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL); 1023 if (xa_is_err(old_entry)) 1024 ret = xa_err(old_entry); 1025 } else { 1026 ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp, 1027 XA_LIMIT(1, dev->attrs.max_qp - 1), 1028 &dev->next_alloc_qpn, GFP_KERNEL); 1029 } 1030 1031 if (ret < 0) { 1032 ret = -ENOMEM; 1033 goto err_out; 1034 } 1035 1036 qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr * 1037 ERDMA_MAX_WQEBB_PER_SQE); 1038 qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr); 1039 1040 if (uctx) { 1041 ret = ib_copy_from_udata(&ureq, udata, 1042 min(sizeof(ureq), udata->inlen)); 1043 if (ret) 1044 goto err_out_xa; 1045 1046 ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len, 1047 ureq.db_record_va); 1048 if (ret) 1049 goto err_out_xa; 1050 1051 memset(&uresp, 0, sizeof(uresp)); 1052 1053 uresp.num_sqe = qp->attrs.sq_size; 1054 uresp.num_rqe = qp->attrs.rq_size; 1055 uresp.qp_id = QP_ID(qp); 1056 uresp.rq_offset = qp->user_qp.rq_offset; 1057 1058 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1059 if (ret) 1060 goto err_out_cmd; 1061 } else { 1062 init_kernel_qp(dev, qp, attrs); 1063 } 1064 1065 qp->attrs.max_send_sge = attrs->cap.max_send_sge; 1066 qp->attrs.max_recv_sge = attrs->cap.max_recv_sge; 1067 1068 if (erdma_device_iwarp(qp->dev)) 1069 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE; 1070 else 1071 qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET; 1072 1073 INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker); 1074 1075 ret = create_qp_cmd(uctx, qp); 1076 if (ret) 1077 goto err_out_cmd; 1078 1079 spin_lock_init(&qp->lock); 1080 1081 return 0; 1082 1083 err_out_cmd: 1084 if (uctx) 1085 free_user_qp(qp, uctx); 1086 else 1087 free_kernel_qp(qp); 1088 err_out_xa: 1089 xa_erase(&dev->qp_xa, QP_ID(qp)); 1090 err_out: 1091 return ret; 1092 } 1093 1094 static int erdma_create_stag(struct erdma_dev *dev, u32 *stag) 1095 { 1096 int stag_idx; 1097 1098 stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]); 1099 if (stag_idx < 0) 1100 return stag_idx; 1101 1102 /* For now, we always let key field be zero. */ 1103 *stag = (stag_idx << 8); 1104 1105 return 0; 1106 } 1107 1108 struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc) 1109 { 1110 struct erdma_dev *dev = to_edev(ibpd->device); 1111 struct erdma_mr *mr; 1112 u32 stag; 1113 int ret; 1114 1115 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1116 if (!mr) 1117 return ERR_PTR(-ENOMEM); 1118 1119 ret = erdma_create_stag(dev, &stag); 1120 if (ret) 1121 goto out_free; 1122 1123 mr->type = ERDMA_MR_TYPE_DMA; 1124 1125 mr->ibmr.lkey = stag; 1126 mr->ibmr.rkey = stag; 1127 mr->ibmr.pd = ibpd; 1128 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc); 1129 ret = regmr_cmd(dev, mr); 1130 if (ret) 1131 goto out_remove_stag; 1132 1133 return &mr->ibmr; 1134 1135 out_remove_stag: 1136 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], 1137 mr->ibmr.lkey >> 8); 1138 1139 out_free: 1140 kfree(mr); 1141 1142 return ERR_PTR(ret); 1143 } 1144 1145 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, 1146 u32 max_num_sg) 1147 { 1148 struct erdma_mr *mr; 1149 struct erdma_dev *dev = to_edev(ibpd->device); 1150 int ret; 1151 u32 stag; 1152 1153 if (mr_type != IB_MR_TYPE_MEM_REG) 1154 return ERR_PTR(-EOPNOTSUPP); 1155 1156 if (max_num_sg > ERDMA_MR_MAX_MTT_CNT) 1157 return ERR_PTR(-EINVAL); 1158 1159 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1160 if (!mr) 1161 return ERR_PTR(-ENOMEM); 1162 1163 ret = erdma_create_stag(dev, &stag); 1164 if (ret) 1165 goto out_free; 1166 1167 mr->type = ERDMA_MR_TYPE_FRMR; 1168 1169 mr->ibmr.lkey = stag; 1170 mr->ibmr.rkey = stag; 1171 mr->ibmr.pd = ibpd; 1172 /* update it in FRMR. */ 1173 mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR | 1174 ERDMA_MR_ACC_RW; 1175 1176 mr->mem.page_size = PAGE_SIZE; /* update it later. */ 1177 mr->mem.page_cnt = max_num_sg; 1178 mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true); 1179 if (IS_ERR(mr->mem.mtt)) { 1180 ret = PTR_ERR(mr->mem.mtt); 1181 goto out_remove_stag; 1182 } 1183 1184 ret = regmr_cmd(dev, mr); 1185 if (ret) 1186 goto out_destroy_mtt; 1187 1188 return &mr->ibmr; 1189 1190 out_destroy_mtt: 1191 erdma_destroy_mtt(dev, mr->mem.mtt); 1192 1193 out_remove_stag: 1194 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], 1195 mr->ibmr.lkey >> 8); 1196 1197 out_free: 1198 kfree(mr); 1199 1200 return ERR_PTR(ret); 1201 } 1202 1203 static int erdma_set_page(struct ib_mr *ibmr, u64 addr) 1204 { 1205 struct erdma_mr *mr = to_emr(ibmr); 1206 1207 if (mr->mem.mtt_nents >= mr->mem.page_cnt) 1208 return -1; 1209 1210 mr->mem.mtt->buf[mr->mem.mtt_nents] = addr; 1211 mr->mem.mtt_nents++; 1212 1213 return 0; 1214 } 1215 1216 int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1217 unsigned int *sg_offset) 1218 { 1219 struct erdma_mr *mr = to_emr(ibmr); 1220 int num; 1221 1222 mr->mem.mtt_nents = 0; 1223 1224 num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset, 1225 erdma_set_page); 1226 1227 return num; 1228 } 1229 1230 struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, 1231 u64 virt, int access, struct ib_dmah *dmah, 1232 struct ib_udata *udata) 1233 { 1234 struct erdma_mr *mr = NULL; 1235 struct erdma_dev *dev = to_edev(ibpd->device); 1236 u32 stag; 1237 int ret; 1238 1239 if (dmah) 1240 return ERR_PTR(-EOPNOTSUPP); 1241 1242 if (!len || len > dev->attrs.max_mr_size) 1243 return ERR_PTR(-EINVAL); 1244 1245 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1246 if (!mr) 1247 return ERR_PTR(-ENOMEM); 1248 1249 ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt, 1250 SZ_2G - SZ_4K, false); 1251 if (ret) 1252 goto err_out_free; 1253 1254 ret = erdma_create_stag(dev, &stag); 1255 if (ret) 1256 goto err_out_put_mtt; 1257 1258 mr->ibmr.lkey = mr->ibmr.rkey = stag; 1259 mr->ibmr.pd = ibpd; 1260 mr->mem.va = virt; 1261 mr->mem.len = len; 1262 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access); 1263 mr->valid = 1; 1264 mr->type = ERDMA_MR_TYPE_NORMAL; 1265 1266 ret = regmr_cmd(dev, mr); 1267 if (ret) 1268 goto err_out_mr; 1269 1270 return &mr->ibmr; 1271 1272 err_out_mr: 1273 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], 1274 mr->ibmr.lkey >> 8); 1275 1276 err_out_put_mtt: 1277 put_mtt_entries(dev, &mr->mem); 1278 1279 err_out_free: 1280 kfree(mr); 1281 1282 return ERR_PTR(ret); 1283 } 1284 1285 int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1286 { 1287 struct erdma_mr *mr; 1288 struct erdma_dev *dev = to_edev(ibmr->device); 1289 struct erdma_cmdq_dereg_mr_req req; 1290 int ret; 1291 1292 mr = to_emr(ibmr); 1293 1294 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1295 CMDQ_OPCODE_DEREG_MR); 1296 1297 req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) | 1298 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF); 1299 1300 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1301 true); 1302 if (ret) 1303 return ret; 1304 1305 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8); 1306 1307 put_mtt_entries(dev, &mr->mem); 1308 1309 kfree(mr); 1310 return 0; 1311 } 1312 1313 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) 1314 { 1315 struct erdma_cq *cq = to_ecq(ibcq); 1316 struct erdma_dev *dev = to_edev(ibcq->device); 1317 struct erdma_ucontext *ctx = rdma_udata_to_drv_context( 1318 udata, struct erdma_ucontext, ibucontext); 1319 int err; 1320 struct erdma_cmdq_destroy_cq_req req; 1321 1322 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1323 CMDQ_OPCODE_DESTROY_CQ); 1324 req.cqn = cq->cqn; 1325 1326 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1327 true); 1328 if (err) 1329 return err; 1330 1331 if (rdma_is_kernel_res(&cq->ibcq.res)) { 1332 dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, 1333 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); 1334 dma_pool_free(dev->db_pool, cq->kern_cq.dbrec, 1335 cq->kern_cq.dbrec_dma); 1336 } else { 1337 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); 1338 put_mtt_entries(dev, &cq->user_cq.qbuf_mem); 1339 } 1340 1341 xa_erase(&dev->cq_xa, cq->cqn); 1342 1343 return 0; 1344 } 1345 1346 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 1347 { 1348 struct erdma_qp *qp = to_eqp(ibqp); 1349 struct erdma_dev *dev = to_edev(ibqp->device); 1350 struct erdma_ucontext *ctx = rdma_udata_to_drv_context( 1351 udata, struct erdma_ucontext, ibucontext); 1352 struct erdma_cmdq_destroy_qp_req req; 1353 union erdma_mod_qp_params params; 1354 int err; 1355 1356 down_write(&qp->state_lock); 1357 if (erdma_device_iwarp(dev)) { 1358 params.iwarp.state = ERDMA_QPS_IWARP_ERROR; 1359 erdma_modify_qp_state_iwarp(qp, ¶ms.iwarp, 1360 ERDMA_QPA_IWARP_STATE); 1361 } else { 1362 params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR; 1363 erdma_modify_qp_state_rocev2(qp, ¶ms.rocev2, 1364 ERDMA_QPA_ROCEV2_STATE); 1365 } 1366 up_write(&qp->state_lock); 1367 1368 cancel_delayed_work_sync(&qp->reflush_dwork); 1369 1370 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1371 CMDQ_OPCODE_DESTROY_QP); 1372 req.qpn = QP_ID(qp); 1373 1374 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1375 true); 1376 if (err) 1377 return err; 1378 1379 erdma_qp_put(qp); 1380 wait_for_completion(&qp->safe_free); 1381 1382 if (rdma_is_kernel_res(&qp->ibqp.res)) { 1383 free_kernel_qp(qp); 1384 } else { 1385 put_mtt_entries(dev, &qp->user_qp.sq_mem); 1386 put_mtt_entries(dev, &qp->user_qp.rq_mem); 1387 erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page); 1388 } 1389 1390 if (qp->cep) 1391 erdma_cep_put(qp->cep); 1392 xa_erase(&dev->qp_xa, QP_ID(qp)); 1393 1394 return 0; 1395 } 1396 1397 void erdma_qp_get_ref(struct ib_qp *ibqp) 1398 { 1399 erdma_qp_get(to_eqp(ibqp)); 1400 } 1401 1402 void erdma_qp_put_ref(struct ib_qp *ibqp) 1403 { 1404 erdma_qp_put(to_eqp(ibqp)); 1405 } 1406 1407 int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma) 1408 { 1409 struct rdma_user_mmap_entry *rdma_entry; 1410 struct erdma_user_mmap_entry *entry; 1411 pgprot_t prot; 1412 int err; 1413 1414 rdma_entry = rdma_user_mmap_entry_get(ctx, vma); 1415 if (!rdma_entry) 1416 return -EINVAL; 1417 1418 entry = to_emmap(rdma_entry); 1419 1420 switch (entry->mmap_flag) { 1421 case ERDMA_MMAP_IO_NC: 1422 /* map doorbell. */ 1423 prot = pgprot_device(vma->vm_page_prot); 1424 break; 1425 default: 1426 err = -EINVAL; 1427 goto put_entry; 1428 } 1429 1430 err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE, 1431 prot, rdma_entry); 1432 1433 put_entry: 1434 rdma_user_mmap_entry_put(rdma_entry); 1435 return err; 1436 } 1437 1438 void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 1439 { 1440 struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry); 1441 1442 kfree(entry); 1443 } 1444 1445 static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx, 1446 bool ext_db_en) 1447 { 1448 struct erdma_cmdq_ext_db_req req = {}; 1449 u64 val0, val1; 1450 int ret; 1451 1452 /* 1453 * CAP_SYS_RAWIO is required if hardware does not support extend 1454 * doorbell mechanism. 1455 */ 1456 if (!ext_db_en && !capable(CAP_SYS_RAWIO)) 1457 return -EPERM; 1458 1459 if (!ext_db_en) { 1460 ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET; 1461 ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET; 1462 ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET; 1463 return 0; 1464 } 1465 1466 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 1467 CMDQ_OPCODE_ALLOC_DB); 1468 1469 req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) | 1470 FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) | 1471 FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1); 1472 1473 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1, 1474 true); 1475 if (ret) 1476 return ret; 1477 1478 ctx->ext_db.enable = true; 1479 ctx->ext_db.sdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_SDB); 1480 ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB); 1481 ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB); 1482 1483 ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT); 1484 ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT); 1485 ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT); 1486 1487 return 0; 1488 } 1489 1490 static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx) 1491 { 1492 struct erdma_cmdq_ext_db_req req = {}; 1493 int ret; 1494 1495 if (!ctx->ext_db.enable) 1496 return; 1497 1498 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 1499 CMDQ_OPCODE_FREE_DB); 1500 1501 req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) | 1502 FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) | 1503 FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1); 1504 1505 req.sdb_off = ctx->ext_db.sdb_off; 1506 req.rdb_off = ctx->ext_db.rdb_off; 1507 req.cdb_off = ctx->ext_db.cdb_off; 1508 1509 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1510 true); 1511 if (ret) 1512 ibdev_err_ratelimited(&dev->ibdev, 1513 "free db resources failed %d", ret); 1514 } 1515 1516 static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx) 1517 { 1518 rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry); 1519 rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry); 1520 rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry); 1521 } 1522 1523 int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata) 1524 { 1525 struct erdma_ucontext *ctx = to_ectx(ibctx); 1526 struct erdma_dev *dev = to_edev(ibctx->device); 1527 int ret; 1528 struct erdma_uresp_alloc_ctx uresp = {}; 1529 1530 if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) { 1531 ret = -ENOMEM; 1532 goto err_out; 1533 } 1534 1535 if (udata->outlen < sizeof(uresp)) { 1536 ret = -EINVAL; 1537 goto err_out; 1538 } 1539 1540 INIT_LIST_HEAD(&ctx->dbrecords_page_list); 1541 mutex_init(&ctx->dbrecords_page_mutex); 1542 1543 ret = alloc_db_resources(dev, ctx, 1544 !!(dev->attrs.cap_flags & 1545 ERDMA_DEV_CAP_FLAGS_EXTEND_DB)); 1546 if (ret) 1547 goto err_out; 1548 1549 ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert( 1550 ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb); 1551 if (!ctx->sq_db_mmap_entry) { 1552 ret = -ENOMEM; 1553 goto err_free_ext_db; 1554 } 1555 1556 ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert( 1557 ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb); 1558 if (!ctx->rq_db_mmap_entry) { 1559 ret = -EINVAL; 1560 goto err_put_mmap_entries; 1561 } 1562 1563 ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert( 1564 ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb); 1565 if (!ctx->cq_db_mmap_entry) { 1566 ret = -EINVAL; 1567 goto err_put_mmap_entries; 1568 } 1569 1570 uresp.dev_id = dev->pdev->device; 1571 1572 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1573 if (ret) 1574 goto err_put_mmap_entries; 1575 1576 return 0; 1577 1578 err_put_mmap_entries: 1579 erdma_uctx_user_mmap_entries_remove(ctx); 1580 1581 err_free_ext_db: 1582 free_db_resources(dev, ctx); 1583 1584 err_out: 1585 atomic_dec(&dev->num_ctx); 1586 return ret; 1587 } 1588 1589 void erdma_dealloc_ucontext(struct ib_ucontext *ibctx) 1590 { 1591 struct erdma_dev *dev = to_edev(ibctx->device); 1592 struct erdma_ucontext *ctx = to_ectx(ibctx); 1593 1594 erdma_uctx_user_mmap_entries_remove(ctx); 1595 free_db_resources(dev, ctx); 1596 atomic_dec(&dev->num_ctx); 1597 } 1598 1599 static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr, 1600 struct erdma_av *av, u16 sport) 1601 { 1602 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 1603 1604 av->port = rdma_ah_get_port_num(ah_attr); 1605 av->sgid_index = grh->sgid_index; 1606 av->hop_limit = grh->hop_limit; 1607 av->traffic_class = grh->traffic_class; 1608 av->sl = rdma_ah_get_sl(ah_attr); 1609 1610 av->flow_label = grh->flow_label; 1611 av->udp_sport = sport; 1612 1613 ether_addr_copy(av->dmac, ah_attr->roce.dmac); 1614 memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE); 1615 1616 if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid)) 1617 av->ntype = ERDMA_NETWORK_TYPE_IPV4; 1618 else 1619 av->ntype = ERDMA_NETWORK_TYPE_IPV6; 1620 } 1621 1622 static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr) 1623 { 1624 ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE; 1625 1626 rdma_ah_set_sl(ah_attr, av->sl); 1627 rdma_ah_set_port_num(ah_attr, av->port); 1628 rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH); 1629 1630 rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index, 1631 av->hop_limit, av->traffic_class); 1632 rdma_ah_set_dgid_raw(ah_attr, av->dgid); 1633 } 1634 1635 static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = { 1636 [ERDMA_PROTO_IWARP] = { 1637 [IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE, 1638 [IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE, 1639 [IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR, 1640 [IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS, 1641 [IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING, 1642 [IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE, 1643 [IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR, 1644 }, 1645 [ERDMA_PROTO_ROCEV2] = { 1646 [IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET, 1647 [IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT, 1648 [IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR, 1649 [IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS, 1650 [IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD, 1651 [IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE, 1652 [IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR, 1653 }, 1654 }; 1655 1656 static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = { 1657 [ERDMA_PROTO_IWARP] = { 1658 [ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT, 1659 [ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR, 1660 [ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS, 1661 [ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR, 1662 [ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR, 1663 [ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR, 1664 }, 1665 [ERDMA_PROTO_ROCEV2] = { 1666 [ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET, 1667 [ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT, 1668 [ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR, 1669 [ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS, 1670 [ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD, 1671 [ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE, 1672 [ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR, 1673 }, 1674 }; 1675 1676 static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state) 1677 { 1678 return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state]; 1679 } 1680 1681 static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state) 1682 { 1683 return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state]; 1684 } 1685 1686 static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state) 1687 { 1688 return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state]; 1689 } 1690 1691 static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state) 1692 { 1693 return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state]; 1694 } 1695 1696 static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr, 1697 int attr_mask) 1698 { 1699 enum ib_qp_state cur_state, nxt_state; 1700 struct erdma_dev *dev = qp->dev; 1701 int ret = -EINVAL; 1702 1703 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) { 1704 ret = -EOPNOTSUPP; 1705 goto out; 1706 } 1707 1708 if ((attr_mask & IB_QP_PORT) && 1709 !rdma_is_port_valid(&dev->ibdev, attr->port_num)) 1710 goto out; 1711 1712 if (erdma_device_rocev2(dev)) { 1713 cur_state = (attr_mask & IB_QP_CUR_STATE) ? 1714 attr->cur_qp_state : 1715 rocev2_to_ib_qps(qp->attrs.rocev2.state); 1716 1717 nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : 1718 cur_state; 1719 1720 if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type, 1721 attr_mask)) 1722 goto out; 1723 1724 if ((attr_mask & IB_QP_AV) && 1725 erdma_check_gid_attr( 1726 rdma_ah_read_grh(&attr->ah_attr)->sgid_attr)) 1727 goto out; 1728 1729 if ((attr_mask & IB_QP_PKEY_INDEX) && 1730 attr->pkey_index >= ERDMA_MAX_PKEYS) 1731 goto out; 1732 } 1733 1734 return 0; 1735 1736 out: 1737 return ret; 1738 } 1739 1740 static void erdma_init_mod_qp_params_rocev2( 1741 struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params, 1742 int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask) 1743 { 1744 enum erdma_qpa_mask_rocev2 to_modify_attrs = 0; 1745 enum erdma_qps_rocev2 cur_state, nxt_state; 1746 u16 udp_sport; 1747 1748 if (ib_attr_mask & IB_QP_CUR_STATE) 1749 cur_state = ib_to_rocev2_qps(attr->cur_qp_state); 1750 else 1751 cur_state = qp->attrs.rocev2.state; 1752 1753 if (ib_attr_mask & IB_QP_STATE) 1754 nxt_state = ib_to_rocev2_qps(attr->qp_state); 1755 else 1756 nxt_state = cur_state; 1757 1758 to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE; 1759 params->state = nxt_state; 1760 1761 if (ib_attr_mask & IB_QP_QKEY) { 1762 to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY; 1763 params->qkey = attr->qkey; 1764 } 1765 1766 if (ib_attr_mask & IB_QP_SQ_PSN) { 1767 to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN; 1768 params->sq_psn = attr->sq_psn; 1769 } 1770 1771 if (ib_attr_mask & IB_QP_RQ_PSN) { 1772 to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN; 1773 params->rq_psn = attr->rq_psn; 1774 } 1775 1776 if (ib_attr_mask & IB_QP_DEST_QPN) { 1777 to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN; 1778 params->dst_qpn = attr->dest_qp_num; 1779 } 1780 1781 if (ib_attr_mask & IB_QP_AV) { 1782 to_modify_attrs |= ERDMA_QPA_ROCEV2_AV; 1783 udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, 1784 QP_ID(qp), params->dst_qpn); 1785 erdma_attr_to_av(&attr->ah_attr, ¶ms->av, udp_sport); 1786 } 1787 1788 *erdma_attr_mask = to_modify_attrs; 1789 } 1790 1791 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1792 struct ib_udata *udata) 1793 { 1794 struct erdma_qp *qp = to_eqp(ibqp); 1795 union erdma_mod_qp_params params; 1796 int ret = 0, erdma_attr_mask = 0; 1797 1798 down_write(&qp->state_lock); 1799 1800 ret = erdma_check_qp_attrs(qp, attr, attr_mask); 1801 if (ret) 1802 goto out; 1803 1804 if (erdma_device_iwarp(qp->dev)) { 1805 if (attr_mask & IB_QP_STATE) { 1806 erdma_attr_mask |= ERDMA_QPA_IWARP_STATE; 1807 params.iwarp.state = ib_to_iwarp_qps(attr->qp_state); 1808 } 1809 1810 ret = erdma_modify_qp_state_iwarp(qp, ¶ms.iwarp, 1811 erdma_attr_mask); 1812 } else { 1813 erdma_init_mod_qp_params_rocev2( 1814 qp, ¶ms.rocev2, &erdma_attr_mask, attr, attr_mask); 1815 1816 ret = erdma_modify_qp_state_rocev2(qp, ¶ms.rocev2, 1817 erdma_attr_mask); 1818 } 1819 1820 out: 1821 up_write(&qp->state_lock); 1822 return ret; 1823 } 1824 1825 static enum ib_qp_state query_qp_state(struct erdma_qp *qp) 1826 { 1827 if (erdma_device_iwarp(qp->dev)) 1828 return iwarp_to_ib_qps(qp->attrs.iwarp.state); 1829 else 1830 return rocev2_to_ib_qps(qp->attrs.rocev2.state); 1831 } 1832 1833 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 1834 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1835 { 1836 struct erdma_cmdq_query_qp_req_rocev2 req; 1837 struct erdma_dev *dev; 1838 struct erdma_qp *qp; 1839 u64 resp0, resp1; 1840 int ret; 1841 1842 if (ibqp && qp_attr && qp_init_attr) { 1843 qp = to_eqp(ibqp); 1844 dev = to_edev(ibqp->device); 1845 } else { 1846 return -EINVAL; 1847 } 1848 1849 qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE; 1850 qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE; 1851 1852 qp_attr->cap.max_send_wr = qp->attrs.sq_size; 1853 qp_attr->cap.max_recv_wr = qp->attrs.rq_size; 1854 qp_attr->cap.max_send_sge = qp->attrs.max_send_sge; 1855 qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge; 1856 1857 qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu); 1858 qp_attr->max_rd_atomic = qp->attrs.irq_size; 1859 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size; 1860 1861 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 1862 IB_ACCESS_REMOTE_WRITE | 1863 IB_ACCESS_REMOTE_READ; 1864 1865 qp_init_attr->cap = qp_attr->cap; 1866 1867 if (erdma_device_rocev2(dev)) { 1868 /* Query hardware to get some attributes */ 1869 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1870 CMDQ_OPCODE_QUERY_QP); 1871 req.qpn = QP_ID(qp); 1872 1873 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, 1874 &resp1, true); 1875 if (ret) 1876 return ret; 1877 1878 qp_attr->sq_psn = 1879 FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0); 1880 qp_attr->rq_psn = 1881 FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0); 1882 qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET( 1883 ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0)); 1884 qp_attr->cur_qp_state = qp_attr->qp_state; 1885 qp_attr->sq_draining = FIELD_GET( 1886 ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0); 1887 1888 qp_attr->pkey_index = 0; 1889 qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn; 1890 1891 if (qp->ibqp.qp_type == IB_QPT_RC) 1892 erdma_av_to_attr(&qp->attrs.rocev2.av, 1893 &qp_attr->ah_attr); 1894 } else { 1895 qp_attr->qp_state = query_qp_state(qp); 1896 qp_attr->cur_qp_state = qp_attr->qp_state; 1897 } 1898 1899 return 0; 1900 } 1901 1902 static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq, 1903 struct erdma_ureq_create_cq *ureq) 1904 { 1905 int ret; 1906 struct erdma_dev *dev = to_edev(cq->ibcq.device); 1907 1908 ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va, 1909 ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K, 1910 true); 1911 if (ret) 1912 return ret; 1913 1914 ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va, 1915 &cq->user_cq.user_dbr_page, 1916 &cq->user_cq.dbrec_dma); 1917 if (ret) 1918 put_mtt_entries(dev, &cq->user_cq.qbuf_mem); 1919 1920 return ret; 1921 } 1922 1923 static int erdma_init_kernel_cq(struct erdma_cq *cq) 1924 { 1925 struct erdma_dev *dev = to_edev(cq->ibcq.device); 1926 1927 cq->kern_cq.qbuf = 1928 dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, 1929 &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL); 1930 if (!cq->kern_cq.qbuf) 1931 return -ENOMEM; 1932 1933 cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, 1934 &cq->kern_cq.dbrec_dma); 1935 if (!cq->kern_cq.dbrec) 1936 goto err_out; 1937 1938 spin_lock_init(&cq->kern_cq.lock); 1939 /* use default cqdb addr */ 1940 cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET; 1941 1942 return 0; 1943 1944 err_out: 1945 dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, 1946 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); 1947 1948 return -ENOMEM; 1949 } 1950 1951 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 1952 struct uverbs_attr_bundle *attrs) 1953 { 1954 struct ib_udata *udata = &attrs->driver_udata; 1955 struct erdma_cq *cq = to_ecq(ibcq); 1956 struct erdma_dev *dev = to_edev(ibcq->device); 1957 unsigned int depth = attr->cqe; 1958 int ret; 1959 struct erdma_ucontext *ctx = rdma_udata_to_drv_context( 1960 udata, struct erdma_ucontext, ibucontext); 1961 1962 if (depth > dev->attrs.max_cqe) 1963 return -EINVAL; 1964 1965 depth = roundup_pow_of_two(depth); 1966 cq->ibcq.cqe = depth; 1967 cq->depth = depth; 1968 cq->assoc_eqn = attr->comp_vector + 1; 1969 1970 ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq, 1971 XA_LIMIT(1, dev->attrs.max_cq - 1), 1972 &dev->next_alloc_cqn, GFP_KERNEL); 1973 if (ret < 0) 1974 return ret; 1975 1976 if (!rdma_is_kernel_res(&ibcq->res)) { 1977 struct erdma_ureq_create_cq ureq; 1978 struct erdma_uresp_create_cq uresp; 1979 1980 ret = ib_copy_from_udata(&ureq, udata, 1981 min(udata->inlen, sizeof(ureq))); 1982 if (ret) 1983 goto err_out_xa; 1984 1985 ret = erdma_init_user_cq(ctx, cq, &ureq); 1986 if (ret) 1987 goto err_out_xa; 1988 1989 uresp.cq_id = cq->cqn; 1990 uresp.num_cqe = depth; 1991 1992 ret = ib_copy_to_udata(udata, &uresp, 1993 min(sizeof(uresp), udata->outlen)); 1994 if (ret) 1995 goto err_free_res; 1996 } else { 1997 ret = erdma_init_kernel_cq(cq); 1998 if (ret) 1999 goto err_out_xa; 2000 } 2001 2002 ret = create_cq_cmd(ctx, cq); 2003 if (ret) 2004 goto err_free_res; 2005 2006 return 0; 2007 2008 err_free_res: 2009 if (!rdma_is_kernel_res(&ibcq->res)) { 2010 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); 2011 put_mtt_entries(dev, &cq->user_cq.qbuf_mem); 2012 } else { 2013 dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT, 2014 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); 2015 dma_pool_free(dev->db_pool, cq->kern_cq.dbrec, 2016 cq->kern_cq.dbrec_dma); 2017 } 2018 2019 err_out_xa: 2020 xa_erase(&dev->cq_xa, cq->cqn); 2021 2022 return ret; 2023 } 2024 2025 void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext) 2026 { 2027 } 2028 2029 void erdma_set_mtu(struct erdma_dev *dev, u32 mtu) 2030 { 2031 struct erdma_cmdq_config_mtu_req req; 2032 2033 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 2034 CMDQ_OPCODE_CONF_MTU); 2035 req.mtu = mtu; 2036 2037 erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true); 2038 } 2039 2040 void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason) 2041 { 2042 struct ib_event event; 2043 2044 event.device = &dev->ibdev; 2045 event.element.port_num = 1; 2046 event.event = reason; 2047 2048 ib_dispatch_event(&event); 2049 } 2050 2051 enum counters { 2052 ERDMA_STATS_TX_REQS_CNT, 2053 ERDMA_STATS_TX_PACKETS_CNT, 2054 ERDMA_STATS_TX_BYTES_CNT, 2055 ERDMA_STATS_TX_DISABLE_DROP_CNT, 2056 ERDMA_STATS_TX_BPS_METER_DROP_CNT, 2057 ERDMA_STATS_TX_PPS_METER_DROP_CNT, 2058 2059 ERDMA_STATS_RX_PACKETS_CNT, 2060 ERDMA_STATS_RX_BYTES_CNT, 2061 ERDMA_STATS_RX_DISABLE_DROP_CNT, 2062 ERDMA_STATS_RX_BPS_METER_DROP_CNT, 2063 ERDMA_STATS_RX_PPS_METER_DROP_CNT, 2064 2065 ERDMA_STATS_MAX 2066 }; 2067 2068 static const struct rdma_stat_desc erdma_descs[] = { 2069 [ERDMA_STATS_TX_REQS_CNT].name = "tx_reqs_cnt", 2070 [ERDMA_STATS_TX_PACKETS_CNT].name = "tx_packets_cnt", 2071 [ERDMA_STATS_TX_BYTES_CNT].name = "tx_bytes_cnt", 2072 [ERDMA_STATS_TX_DISABLE_DROP_CNT].name = "tx_disable_drop_cnt", 2073 [ERDMA_STATS_TX_BPS_METER_DROP_CNT].name = "tx_bps_limit_drop_cnt", 2074 [ERDMA_STATS_TX_PPS_METER_DROP_CNT].name = "tx_pps_limit_drop_cnt", 2075 [ERDMA_STATS_RX_PACKETS_CNT].name = "rx_packets_cnt", 2076 [ERDMA_STATS_RX_BYTES_CNT].name = "rx_bytes_cnt", 2077 [ERDMA_STATS_RX_DISABLE_DROP_CNT].name = "rx_disable_drop_cnt", 2078 [ERDMA_STATS_RX_BPS_METER_DROP_CNT].name = "rx_bps_limit_drop_cnt", 2079 [ERDMA_STATS_RX_PPS_METER_DROP_CNT].name = "rx_pps_limit_drop_cnt", 2080 }; 2081 2082 struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device, 2083 u32 port_num) 2084 { 2085 return rdma_alloc_hw_stats_struct(erdma_descs, ERDMA_STATS_MAX, 2086 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2087 } 2088 2089 static int erdma_query_hw_stats(struct erdma_dev *dev, 2090 struct rdma_hw_stats *stats) 2091 { 2092 struct erdma_cmdq_query_stats_resp *resp; 2093 struct erdma_cmdq_query_req req; 2094 dma_addr_t dma_addr; 2095 int err; 2096 2097 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 2098 CMDQ_OPCODE_GET_STATS); 2099 2100 resp = dma_pool_zalloc(dev->resp_pool, GFP_KERNEL, &dma_addr); 2101 if (!resp) 2102 return -ENOMEM; 2103 2104 req.target_addr = dma_addr; 2105 req.target_length = ERDMA_HW_RESP_SIZE; 2106 2107 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2108 true); 2109 if (err) 2110 goto out; 2111 2112 if (resp->hdr.magic != ERDMA_HW_RESP_MAGIC) { 2113 err = -EINVAL; 2114 goto out; 2115 } 2116 2117 memcpy(&stats->value[0], &resp->tx_req_cnt, 2118 sizeof(u64) * stats->num_counters); 2119 2120 out: 2121 dma_pool_free(dev->resp_pool, resp, dma_addr); 2122 2123 return err; 2124 } 2125 2126 int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 2127 u32 port, int index) 2128 { 2129 struct erdma_dev *dev = to_edev(ibdev); 2130 int ret; 2131 2132 if (port == 0) 2133 return 0; 2134 2135 ret = erdma_query_hw_stats(dev, stats); 2136 if (ret) 2137 return ret; 2138 2139 return stats->num_counters; 2140 } 2141 2142 enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num) 2143 { 2144 return IB_LINK_LAYER_ETHERNET; 2145 } 2146 2147 static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx, 2148 const union ib_gid *gid) 2149 { 2150 struct erdma_cmdq_set_gid_req req; 2151 u8 ntype; 2152 2153 req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) | 2154 FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op); 2155 2156 if (op == ERDMA_SET_GID_OP_ADD) { 2157 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 2158 ntype = ERDMA_NETWORK_TYPE_IPV4; 2159 else 2160 ntype = ERDMA_NETWORK_TYPE_IPV6; 2161 2162 req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype); 2163 2164 memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE); 2165 } 2166 2167 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 2168 CMDQ_OPCODE_SET_GID); 2169 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2170 true); 2171 } 2172 2173 int erdma_add_gid(const struct ib_gid_attr *attr, void **context) 2174 { 2175 struct erdma_dev *dev = to_edev(attr->device); 2176 int ret; 2177 2178 ret = erdma_check_gid_attr(attr); 2179 if (ret) 2180 return ret; 2181 2182 return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index, 2183 &attr->gid); 2184 } 2185 2186 int erdma_del_gid(const struct ib_gid_attr *attr, void **context) 2187 { 2188 return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL, 2189 attr->index, NULL); 2190 } 2191 2192 int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) 2193 { 2194 if (index >= ERDMA_MAX_PKEYS) 2195 return -EINVAL; 2196 2197 *pkey = ERDMA_DEFAULT_PKEY; 2198 return 0; 2199 } 2200 2201 void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av) 2202 { 2203 av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) | 2204 FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype); 2205 2206 av_cfg->traffic_class = av->traffic_class; 2207 av_cfg->hop_limit = av->hop_limit; 2208 av_cfg->sl = av->sl; 2209 2210 av_cfg->udp_sport = av->udp_sport; 2211 av_cfg->sgid_index = av->sgid_index; 2212 2213 ether_addr_copy(av_cfg->dmac, av->dmac); 2214 memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE); 2215 } 2216 2217 int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, 2218 struct ib_udata *udata) 2219 { 2220 const struct ib_global_route *grh = 2221 rdma_ah_read_grh(init_attr->ah_attr); 2222 struct erdma_dev *dev = to_edev(ibah->device); 2223 struct erdma_pd *pd = to_epd(ibah->pd); 2224 struct erdma_ah *ah = to_eah(ibah); 2225 struct erdma_cmdq_create_ah_req req; 2226 u32 udp_sport; 2227 int ret; 2228 2229 ret = erdma_check_gid_attr(grh->sgid_attr); 2230 if (ret) 2231 return ret; 2232 2233 ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]); 2234 if (ret < 0) 2235 return ret; 2236 2237 ah->ahn = ret; 2238 2239 if (grh->flow_label) 2240 udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label); 2241 else 2242 udp_sport = 2243 IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF); 2244 2245 erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport); 2246 2247 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 2248 CMDQ_OPCODE_CREATE_AH); 2249 2250 req.pdn = pd->pdn; 2251 req.ahn = ah->ahn; 2252 erdma_set_av_cfg(&req.av_cfg, &ah->av); 2253 2254 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2255 init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); 2256 if (ret) { 2257 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn); 2258 return ret; 2259 } 2260 2261 return 0; 2262 } 2263 2264 int erdma_destroy_ah(struct ib_ah *ibah, u32 flags) 2265 { 2266 struct erdma_dev *dev = to_edev(ibah->device); 2267 struct erdma_pd *pd = to_epd(ibah->pd); 2268 struct erdma_ah *ah = to_eah(ibah); 2269 struct erdma_cmdq_destroy_ah_req req; 2270 int ret; 2271 2272 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 2273 CMDQ_OPCODE_DESTROY_AH); 2274 2275 req.pdn = pd->pdn; 2276 req.ahn = ah->ahn; 2277 2278 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2279 flags & RDMA_DESTROY_AH_SLEEPABLE); 2280 if (ret) 2281 return ret; 2282 2283 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn); 2284 2285 return 0; 2286 } 2287 2288 int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) 2289 { 2290 struct erdma_ah *ah = to_eah(ibah); 2291 2292 memset(ah_attr, 0, sizeof(*ah_attr)); 2293 erdma_av_to_attr(&ah->av, ah_attr); 2294 2295 return 0; 2296 } 2297