1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */ 4 /* Kai Shen <kaishen@linux.alibaba.com> */ 5 /* Copyright (c) 2020-2022, Alibaba Group. */ 6 7 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 8 /* Copyright (c) 2008-2019, IBM Corporation */ 9 10 /* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */ 11 12 #include <linux/vmalloc.h> 13 #include <net/addrconf.h> 14 #include <rdma/erdma-abi.h> 15 #include <rdma/ib_umem.h> 16 #include <rdma/uverbs_ioctl.h> 17 18 #include "erdma.h" 19 #include "erdma_cm.h" 20 #include "erdma_verbs.h" 21 22 static void assemble_qbuf_mtt_for_cmd(struct erdma_mem *mem, u32 *cfg, 23 u64 *addr0, u64 *addr1) 24 { 25 struct erdma_mtt *mtt = mem->mtt; 26 27 if (mem->mtt_nents > ERDMA_MAX_INLINE_MTT_ENTRIES) { 28 *addr0 = mtt->buf_dma; 29 *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK, 30 ERDMA_MR_MTT_1LEVEL); 31 } else { 32 *addr0 = mtt->buf[0]; 33 memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1)); 34 *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK, 35 ERDMA_MR_MTT_0LEVEL); 36 } 37 } 38 39 static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) 40 { 41 struct erdma_dev *dev = to_edev(qp->ibqp.device); 42 struct erdma_pd *pd = to_epd(qp->ibqp.pd); 43 struct erdma_cmdq_create_qp_req req; 44 struct erdma_uqp *user_qp; 45 u64 resp0, resp1; 46 int err; 47 48 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 49 CMDQ_OPCODE_CREATE_QP); 50 51 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK, 52 ilog2(qp->attrs.sq_size)) | 53 FIELD_PREP(ERDMA_CMD_CREATE_QP_QPN_MASK, QP_ID(qp)); 54 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK, 55 ilog2(qp->attrs.rq_size)) | 56 FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn); 57 58 if (qp->ibqp.qp_type == IB_QPT_RC) 59 req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK, 60 ERDMA_QPT_RC); 61 else 62 req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_QP_TYPE_MASK, 63 ERDMA_QPT_UD); 64 65 if (rdma_is_kernel_res(&qp->ibqp.res)) { 66 u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT; 67 68 req.sq_cqn_mtt_cfg = 69 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 70 pgsz_range) | 71 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn); 72 req.rq_cqn_mtt_cfg = 73 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 74 pgsz_range) | 75 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn); 76 77 req.sq_mtt_cfg = 78 FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) | 79 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) | 80 FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK, 81 ERDMA_MR_MTT_0LEVEL); 82 req.rq_mtt_cfg = req.sq_mtt_cfg; 83 84 req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr; 85 req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr; 86 req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma; 87 req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma; 88 } else { 89 user_qp = &qp->user_qp; 90 req.sq_cqn_mtt_cfg = FIELD_PREP( 91 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 92 ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT); 93 req.sq_cqn_mtt_cfg |= 94 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn); 95 96 req.rq_cqn_mtt_cfg = FIELD_PREP( 97 ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK, 98 ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT); 99 req.rq_cqn_mtt_cfg |= 100 FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn); 101 102 req.sq_mtt_cfg = user_qp->sq_mem.page_offset; 103 req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 104 user_qp->sq_mem.mtt_nents); 105 106 req.rq_mtt_cfg = user_qp->rq_mem.page_offset; 107 req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 108 user_qp->rq_mem.mtt_nents); 109 110 assemble_qbuf_mtt_for_cmd(&user_qp->sq_mem, &req.sq_mtt_cfg, 111 &req.sq_buf_addr, req.sq_mtt_entry); 112 assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg, 113 &req.rq_buf_addr, req.rq_mtt_entry); 114 115 req.sq_dbrec_dma = user_qp->sq_dbrec_dma; 116 req.rq_dbrec_dma = user_qp->rq_dbrec_dma; 117 118 if (uctx->ext_db.enable) { 119 req.sq_cqn_mtt_cfg |= 120 FIELD_PREP(ERDMA_CMD_CREATE_QP_DB_CFG_MASK, 1); 121 req.db_cfg = 122 FIELD_PREP(ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK, 123 uctx->ext_db.sdb_off) | 124 FIELD_PREP(ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK, 125 uctx->ext_db.rdb_off); 126 } 127 } 128 129 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1, 130 true); 131 if (!err && erdma_device_iwarp(dev)) 132 qp->attrs.iwarp.cookie = 133 FIELD_GET(ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK, resp0); 134 135 return err; 136 } 137 138 static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) 139 { 140 struct erdma_pd *pd = to_epd(mr->ibmr.pd); 141 u32 mtt_level = ERDMA_MR_MTT_0LEVEL; 142 struct erdma_cmdq_reg_mr_req req; 143 144 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR); 145 146 if (mr->type == ERDMA_MR_TYPE_FRMR || 147 mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) { 148 if (mr->mem.mtt->continuous) { 149 req.phy_addr[0] = mr->mem.mtt->buf_dma; 150 mtt_level = ERDMA_MR_MTT_1LEVEL; 151 } else { 152 req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist); 153 mtt_level = mr->mem.mtt->level; 154 } 155 } else if (mr->type != ERDMA_MR_TYPE_DMA) { 156 memcpy(req.phy_addr, mr->mem.mtt->buf, 157 MTT_SIZE(mr->mem.page_cnt)); 158 } 159 160 req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) | 161 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) | 162 FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8); 163 req.cfg1 = FIELD_PREP(ERDMA_CMD_REGMR_PD_MASK, pd->pdn) | 164 FIELD_PREP(ERDMA_CMD_REGMR_TYPE_MASK, mr->type) | 165 FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access); 166 req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK, 167 ilog2(mr->mem.page_size)) | 168 FIELD_PREP(ERDMA_CMD_REGMR_MTT_LEVEL_MASK, mtt_level) | 169 FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt); 170 171 if (mr->type == ERDMA_MR_TYPE_DMA) 172 goto post_cmd; 173 174 if (mr->type == ERDMA_MR_TYPE_NORMAL) { 175 req.start_va = mr->mem.va; 176 req.size = mr->mem.len; 177 } 178 179 if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) { 180 req.cfg0 |= FIELD_PREP(ERDMA_CMD_MR_VERSION_MASK, 1); 181 req.cfg2 |= FIELD_PREP(ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK, 182 PAGE_SHIFT - ERDMA_HW_PAGE_SHIFT); 183 req.size_h = upper_32_bits(mr->mem.len); 184 req.mtt_cnt_h = mr->mem.page_cnt >> 20; 185 } 186 187 post_cmd: 188 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 189 true); 190 } 191 192 static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq) 193 { 194 struct erdma_dev *dev = to_edev(cq->ibcq.device); 195 struct erdma_cmdq_create_cq_req req; 196 struct erdma_mem *mem; 197 u32 page_size; 198 199 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 200 CMDQ_OPCODE_CREATE_CQ); 201 202 req.cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_CQN_MASK, cq->cqn) | 203 FIELD_PREP(ERDMA_CMD_CREATE_CQ_DEPTH_MASK, ilog2(cq->depth)); 204 req.cfg1 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_EQN_MASK, cq->assoc_eqn); 205 206 if (rdma_is_kernel_res(&cq->ibcq.res)) { 207 page_size = SZ_32M; 208 req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK, 209 ilog2(page_size) - ERDMA_HW_PAGE_SHIFT); 210 req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr); 211 req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr); 212 213 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) | 214 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK, 215 ERDMA_MR_MTT_0LEVEL); 216 217 req.first_page_offset = 0; 218 req.cq_dbrec_dma = cq->kern_cq.dbrec_dma; 219 } else { 220 mem = &cq->user_cq.qbuf_mem; 221 req.cfg0 |= 222 FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK, 223 ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT); 224 if (mem->mtt_nents == 1) { 225 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]); 226 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]); 227 req.cfg1 |= 228 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK, 229 ERDMA_MR_MTT_0LEVEL); 230 } else { 231 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma); 232 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma); 233 req.cfg1 |= 234 FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK, 235 ERDMA_MR_MTT_1LEVEL); 236 } 237 req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 238 mem->mtt_nents); 239 240 req.first_page_offset = mem->page_offset; 241 req.cq_dbrec_dma = cq->user_cq.dbrec_dma; 242 243 if (uctx->ext_db.enable) { 244 req.cfg1 |= FIELD_PREP( 245 ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK, 1); 246 req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_DB_CFG_MASK, 247 uctx->ext_db.cdb_off); 248 } 249 } 250 251 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 252 true); 253 } 254 255 static int erdma_alloc_idx(struct erdma_resource_cb *res_cb) 256 { 257 int idx; 258 unsigned long flags; 259 260 spin_lock_irqsave(&res_cb->lock, flags); 261 idx = find_next_zero_bit(res_cb->bitmap, res_cb->max_cap, 262 res_cb->next_alloc_idx); 263 if (idx == res_cb->max_cap) { 264 idx = find_first_zero_bit(res_cb->bitmap, res_cb->max_cap); 265 if (idx == res_cb->max_cap) { 266 res_cb->next_alloc_idx = 1; 267 spin_unlock_irqrestore(&res_cb->lock, flags); 268 return -ENOSPC; 269 } 270 } 271 272 set_bit(idx, res_cb->bitmap); 273 res_cb->next_alloc_idx = idx + 1; 274 spin_unlock_irqrestore(&res_cb->lock, flags); 275 276 return idx; 277 } 278 279 static inline void erdma_free_idx(struct erdma_resource_cb *res_cb, u32 idx) 280 { 281 unsigned long flags; 282 u32 used; 283 284 spin_lock_irqsave(&res_cb->lock, flags); 285 used = __test_and_clear_bit(idx, res_cb->bitmap); 286 spin_unlock_irqrestore(&res_cb->lock, flags); 287 WARN_ON(!used); 288 } 289 290 static struct rdma_user_mmap_entry * 291 erdma_user_mmap_entry_insert(struct erdma_ucontext *uctx, void *address, 292 u32 size, u8 mmap_flag, u64 *mmap_offset) 293 { 294 struct erdma_user_mmap_entry *entry = 295 kzalloc(sizeof(*entry), GFP_KERNEL); 296 int ret; 297 298 if (!entry) 299 return NULL; 300 301 entry->address = (u64)address; 302 entry->mmap_flag = mmap_flag; 303 304 size = PAGE_ALIGN(size); 305 306 ret = rdma_user_mmap_entry_insert(&uctx->ibucontext, &entry->rdma_entry, 307 size); 308 if (ret) { 309 kfree(entry); 310 return NULL; 311 } 312 313 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 314 315 return &entry->rdma_entry; 316 } 317 318 int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, 319 struct ib_udata *unused) 320 { 321 struct erdma_dev *dev = to_edev(ibdev); 322 323 memset(attr, 0, sizeof(*attr)); 324 325 attr->max_mr_size = dev->attrs.max_mr_size; 326 attr->vendor_id = PCI_VENDOR_ID_ALIBABA; 327 attr->vendor_part_id = dev->pdev->device; 328 attr->hw_ver = dev->pdev->revision; 329 attr->max_qp = dev->attrs.max_qp - 1; 330 attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr); 331 attr->max_qp_rd_atom = dev->attrs.max_ord; 332 attr->max_qp_init_rd_atom = dev->attrs.max_ird; 333 attr->max_res_rd_atom = dev->attrs.max_qp * dev->attrs.max_ird; 334 attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS; 335 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 336 ibdev->local_dma_lkey = dev->attrs.local_dma_key; 337 attr->max_send_sge = dev->attrs.max_send_sge; 338 attr->max_recv_sge = dev->attrs.max_recv_sge; 339 attr->max_sge_rd = dev->attrs.max_sge_rd; 340 attr->max_cq = dev->attrs.max_cq - 1; 341 attr->max_cqe = dev->attrs.max_cqe; 342 attr->max_mr = dev->attrs.max_mr; 343 attr->max_pd = dev->attrs.max_pd; 344 attr->max_mw = dev->attrs.max_mw; 345 attr->max_fast_reg_page_list_len = ERDMA_MAX_FRMR_PA; 346 attr->page_size_cap = ERDMA_PAGE_SIZE_SUPPORT; 347 348 if (erdma_device_rocev2(dev)) { 349 attr->max_pkeys = ERDMA_MAX_PKEYS; 350 attr->max_ah = dev->attrs.max_ah; 351 } 352 353 if (dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_ATOMIC) 354 attr->atomic_cap = IB_ATOMIC_GLOB; 355 356 attr->fw_ver = dev->attrs.fw_version; 357 358 if (dev->netdev) 359 addrconf_addr_eui48((u8 *)&attr->sys_image_guid, 360 dev->netdev->dev_addr); 361 362 return 0; 363 } 364 365 int erdma_query_gid(struct ib_device *ibdev, u32 port, int idx, 366 union ib_gid *gid) 367 { 368 struct erdma_dev *dev = to_edev(ibdev); 369 370 memset(gid, 0, sizeof(*gid)); 371 ether_addr_copy(gid->raw, dev->attrs.peer_addr); 372 373 return 0; 374 } 375 376 int erdma_query_port(struct ib_device *ibdev, u32 port, 377 struct ib_port_attr *attr) 378 { 379 struct erdma_dev *dev = to_edev(ibdev); 380 struct net_device *ndev = dev->netdev; 381 382 memset(attr, 0, sizeof(*attr)); 383 384 if (erdma_device_iwarp(dev)) { 385 attr->gid_tbl_len = 1; 386 } else { 387 attr->gid_tbl_len = dev->attrs.max_gid; 388 attr->ip_gids = true; 389 attr->pkey_tbl_len = ERDMA_MAX_PKEYS; 390 } 391 392 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; 393 attr->max_msg_sz = -1; 394 395 if (!ndev) 396 goto out; 397 398 ib_get_eth_speed(ibdev, port, &attr->active_speed, &attr->active_width); 399 attr->max_mtu = ib_mtu_int_to_enum(ndev->mtu); 400 attr->active_mtu = ib_mtu_int_to_enum(ndev->mtu); 401 attr->state = ib_get_curr_port_state(ndev); 402 403 out: 404 if (attr->state == IB_PORT_ACTIVE) 405 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 406 else 407 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; 408 409 return 0; 410 } 411 412 int erdma_get_port_immutable(struct ib_device *ibdev, u32 port, 413 struct ib_port_immutable *port_immutable) 414 { 415 struct erdma_dev *dev = to_edev(ibdev); 416 417 if (erdma_device_iwarp(dev)) { 418 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; 419 port_immutable->gid_tbl_len = 1; 420 } else { 421 port_immutable->core_cap_flags = 422 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 423 port_immutable->max_mad_size = IB_MGMT_MAD_SIZE; 424 port_immutable->gid_tbl_len = dev->attrs.max_gid; 425 port_immutable->pkey_tbl_len = ERDMA_MAX_PKEYS; 426 } 427 428 return 0; 429 } 430 431 int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 432 { 433 struct erdma_pd *pd = to_epd(ibpd); 434 struct erdma_dev *dev = to_edev(ibpd->device); 435 int pdn; 436 437 pdn = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_PD]); 438 if (pdn < 0) 439 return pdn; 440 441 pd->pdn = pdn; 442 443 return 0; 444 } 445 446 int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 447 { 448 struct erdma_pd *pd = to_epd(ibpd); 449 struct erdma_dev *dev = to_edev(ibpd->device); 450 451 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_PD], pd->pdn); 452 453 return 0; 454 } 455 456 static void erdma_flush_worker(struct work_struct *work) 457 { 458 struct delayed_work *dwork = to_delayed_work(work); 459 struct erdma_qp *qp = 460 container_of(dwork, struct erdma_qp, reflush_dwork); 461 struct erdma_cmdq_reflush_req req; 462 463 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 464 CMDQ_OPCODE_REFLUSH); 465 req.qpn = QP_ID(qp); 466 req.sq_pi = qp->kern_qp.sq_pi; 467 req.rq_pi = qp->kern_qp.rq_pi; 468 erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL, 469 true); 470 } 471 472 static int erdma_qp_validate_cap(struct erdma_dev *dev, 473 struct ib_qp_init_attr *attrs) 474 { 475 if ((attrs->cap.max_send_wr > dev->attrs.max_send_wr) || 476 (attrs->cap.max_recv_wr > dev->attrs.max_recv_wr) || 477 (attrs->cap.max_send_sge > dev->attrs.max_send_sge) || 478 (attrs->cap.max_recv_sge > dev->attrs.max_recv_sge) || 479 (attrs->cap.max_inline_data > ERDMA_MAX_INLINE) || 480 !attrs->cap.max_send_wr || !attrs->cap.max_recv_wr) { 481 return -EINVAL; 482 } 483 484 return 0; 485 } 486 487 static int erdma_qp_validate_attr(struct erdma_dev *dev, 488 struct ib_qp_init_attr *attrs) 489 { 490 if (erdma_device_iwarp(dev) && attrs->qp_type != IB_QPT_RC) 491 return -EOPNOTSUPP; 492 493 if (erdma_device_rocev2(dev) && attrs->qp_type != IB_QPT_RC && 494 attrs->qp_type != IB_QPT_UD && attrs->qp_type != IB_QPT_GSI) 495 return -EOPNOTSUPP; 496 497 if (attrs->srq) 498 return -EOPNOTSUPP; 499 500 if (!attrs->send_cq || !attrs->recv_cq) 501 return -EOPNOTSUPP; 502 503 return 0; 504 } 505 506 static void free_kernel_qp(struct erdma_qp *qp) 507 { 508 struct erdma_dev *dev = qp->dev; 509 510 vfree(qp->kern_qp.swr_tbl); 511 vfree(qp->kern_qp.rwr_tbl); 512 513 if (qp->kern_qp.sq_buf) 514 dma_free_coherent(&dev->pdev->dev, 515 qp->attrs.sq_size << SQEBB_SHIFT, 516 qp->kern_qp.sq_buf, 517 qp->kern_qp.sq_buf_dma_addr); 518 519 if (qp->kern_qp.sq_dbrec) 520 dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec, 521 qp->kern_qp.sq_dbrec_dma); 522 523 if (qp->kern_qp.rq_buf) 524 dma_free_coherent(&dev->pdev->dev, 525 qp->attrs.rq_size << RQE_SHIFT, 526 qp->kern_qp.rq_buf, 527 qp->kern_qp.rq_buf_dma_addr); 528 529 if (qp->kern_qp.rq_dbrec) 530 dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec, 531 qp->kern_qp.rq_dbrec_dma); 532 } 533 534 static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp, 535 struct ib_qp_init_attr *attrs) 536 { 537 struct erdma_kqp *kqp = &qp->kern_qp; 538 int size; 539 540 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) 541 kqp->sig_all = 1; 542 543 kqp->sq_pi = 0; 544 kqp->sq_ci = 0; 545 kqp->rq_pi = 0; 546 kqp->rq_ci = 0; 547 kqp->hw_sq_db = 548 dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT); 549 kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET; 550 551 kqp->swr_tbl = vmalloc_array(qp->attrs.sq_size, sizeof(u64)); 552 kqp->rwr_tbl = vmalloc_array(qp->attrs.rq_size, sizeof(u64)); 553 if (!kqp->swr_tbl || !kqp->rwr_tbl) 554 goto err_out; 555 556 size = qp->attrs.sq_size << SQEBB_SHIFT; 557 kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size, 558 &kqp->sq_buf_dma_addr, GFP_KERNEL); 559 if (!kqp->sq_buf) 560 goto err_out; 561 562 kqp->sq_dbrec = 563 dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma); 564 if (!kqp->sq_dbrec) 565 goto err_out; 566 567 size = qp->attrs.rq_size << RQE_SHIFT; 568 kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size, 569 &kqp->rq_buf_dma_addr, GFP_KERNEL); 570 if (!kqp->rq_buf) 571 goto err_out; 572 573 kqp->rq_dbrec = 574 dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma); 575 if (!kqp->rq_dbrec) 576 goto err_out; 577 578 return 0; 579 580 err_out: 581 free_kernel_qp(qp); 582 return -ENOMEM; 583 } 584 585 static void erdma_fill_bottom_mtt(struct erdma_dev *dev, struct erdma_mem *mem) 586 { 587 struct erdma_mtt *mtt = mem->mtt; 588 struct ib_block_iter biter; 589 u32 idx = 0; 590 591 while (mtt->low_level) 592 mtt = mtt->low_level; 593 594 rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) 595 mtt->buf[idx++] = rdma_block_iter_dma_address(&biter); 596 } 597 598 static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev, 599 size_t size) 600 { 601 struct erdma_mtt *mtt; 602 603 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL); 604 if (!mtt) 605 return ERR_PTR(-ENOMEM); 606 607 mtt->size = size; 608 mtt->buf = kzalloc(mtt->size, GFP_KERNEL); 609 if (!mtt->buf) 610 goto err_free_mtt; 611 612 mtt->continuous = true; 613 mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size, 614 DMA_TO_DEVICE); 615 if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma)) 616 goto err_free_mtt_buf; 617 618 return mtt; 619 620 err_free_mtt_buf: 621 kfree(mtt->buf); 622 623 err_free_mtt: 624 kfree(mtt); 625 626 return ERR_PTR(-ENOMEM); 627 } 628 629 static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev, 630 struct erdma_mtt *mtt) 631 { 632 dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE); 633 vfree(mtt->sglist); 634 } 635 636 static void erdma_destroy_scatter_mtt(struct erdma_dev *dev, 637 struct erdma_mtt *mtt) 638 { 639 erdma_destroy_mtt_buf_sg(dev, mtt); 640 vfree(mtt->buf); 641 kfree(mtt); 642 } 643 644 static void erdma_init_middle_mtt(struct erdma_mtt *mtt, 645 struct erdma_mtt *low_mtt) 646 { 647 struct scatterlist *sg; 648 u32 idx = 0, i; 649 650 for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i) 651 mtt->buf[idx++] = sg_dma_address(sg); 652 } 653 654 static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt) 655 { 656 struct scatterlist *sglist; 657 void *buf = mtt->buf; 658 u32 npages, i, nsg; 659 struct page *pg; 660 661 /* Failed if buf is not page aligned */ 662 if ((uintptr_t)buf & ~PAGE_MASK) 663 return -EINVAL; 664 665 npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE); 666 sglist = vzalloc(npages * sizeof(*sglist)); 667 if (!sglist) 668 return -ENOMEM; 669 670 sg_init_table(sglist, npages); 671 for (i = 0; i < npages; i++) { 672 pg = vmalloc_to_page(buf); 673 if (!pg) 674 goto err; 675 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); 676 buf += PAGE_SIZE; 677 } 678 679 nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE); 680 if (!nsg) 681 goto err; 682 683 mtt->sglist = sglist; 684 mtt->nsg = nsg; 685 686 return 0; 687 err: 688 vfree(sglist); 689 690 return -ENOMEM; 691 } 692 693 static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev, 694 size_t size) 695 { 696 struct erdma_mtt *mtt; 697 int ret = -ENOMEM; 698 699 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL); 700 if (!mtt) 701 return ERR_PTR(-ENOMEM); 702 703 mtt->size = ALIGN(size, PAGE_SIZE); 704 mtt->buf = vzalloc(mtt->size); 705 mtt->continuous = false; 706 if (!mtt->buf) 707 goto err_free_mtt; 708 709 ret = erdma_create_mtt_buf_sg(dev, mtt); 710 if (ret) 711 goto err_free_mtt_buf; 712 713 ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n", 714 mtt->size, mtt->nsg); 715 716 return mtt; 717 718 err_free_mtt_buf: 719 vfree(mtt->buf); 720 721 err_free_mtt: 722 kfree(mtt); 723 724 return ERR_PTR(ret); 725 } 726 727 static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size, 728 bool force_continuous) 729 { 730 struct erdma_mtt *mtt, *tmp_mtt; 731 int ret, level = 0; 732 733 ibdev_dbg(&dev->ibdev, "create_mtt, size:%lu, force cont:%d\n", size, 734 force_continuous); 735 736 if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_MTT_VA)) 737 force_continuous = true; 738 739 if (force_continuous) 740 return erdma_create_cont_mtt(dev, size); 741 742 mtt = erdma_create_scatter_mtt(dev, size); 743 if (IS_ERR(mtt)) 744 return mtt; 745 level = 1; 746 747 /* convergence the mtt table. */ 748 while (mtt->nsg != 1 && level <= 3) { 749 tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg)); 750 if (IS_ERR(tmp_mtt)) { 751 ret = PTR_ERR(tmp_mtt); 752 goto err_free_mtt; 753 } 754 erdma_init_middle_mtt(tmp_mtt, mtt); 755 tmp_mtt->low_level = mtt; 756 mtt = tmp_mtt; 757 level++; 758 } 759 760 if (level > 3) { 761 ret = -ENOMEM; 762 goto err_free_mtt; 763 } 764 765 mtt->level = level; 766 ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n", 767 mtt->level, mtt->sglist[0].dma_address); 768 769 return mtt; 770 err_free_mtt: 771 while (mtt) { 772 tmp_mtt = mtt->low_level; 773 erdma_destroy_scatter_mtt(dev, mtt); 774 mtt = tmp_mtt; 775 } 776 777 return ERR_PTR(ret); 778 } 779 780 static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt) 781 { 782 struct erdma_mtt *tmp_mtt; 783 784 if (mtt->continuous) { 785 dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size, 786 DMA_TO_DEVICE); 787 kfree(mtt->buf); 788 kfree(mtt); 789 } else { 790 while (mtt) { 791 tmp_mtt = mtt->low_level; 792 erdma_destroy_scatter_mtt(dev, mtt); 793 mtt = tmp_mtt; 794 } 795 } 796 } 797 798 static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem, 799 u64 start, u64 len, int access, u64 virt, 800 unsigned long req_page_size, bool force_continuous) 801 { 802 int ret = 0; 803 804 mem->umem = ib_umem_get(&dev->ibdev, start, len, access); 805 if (IS_ERR(mem->umem)) { 806 ret = PTR_ERR(mem->umem); 807 mem->umem = NULL; 808 return ret; 809 } 810 811 mem->va = virt; 812 mem->len = len; 813 mem->page_size = ib_umem_find_best_pgsz(mem->umem, req_page_size, virt); 814 mem->page_offset = start & (mem->page_size - 1); 815 mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size); 816 mem->page_cnt = mem->mtt_nents; 817 mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt), 818 force_continuous); 819 if (IS_ERR(mem->mtt)) { 820 ret = PTR_ERR(mem->mtt); 821 goto error_ret; 822 } 823 824 erdma_fill_bottom_mtt(dev, mem); 825 826 return 0; 827 828 error_ret: 829 if (mem->umem) { 830 ib_umem_release(mem->umem); 831 mem->umem = NULL; 832 } 833 834 return ret; 835 } 836 837 static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem) 838 { 839 if (mem->mtt) 840 erdma_destroy_mtt(dev, mem->mtt); 841 842 if (mem->umem) { 843 ib_umem_release(mem->umem); 844 mem->umem = NULL; 845 } 846 } 847 848 static int erdma_map_user_dbrecords(struct erdma_ucontext *ctx, 849 u64 dbrecords_va, 850 struct erdma_user_dbrecords_page **dbr_page, 851 dma_addr_t *dma_addr) 852 { 853 struct erdma_user_dbrecords_page *page = NULL; 854 int rv = 0; 855 856 mutex_lock(&ctx->dbrecords_page_mutex); 857 858 list_for_each_entry(page, &ctx->dbrecords_page_list, list) 859 if (page->va == (dbrecords_va & PAGE_MASK)) 860 goto found; 861 862 page = kmalloc(sizeof(*page), GFP_KERNEL); 863 if (!page) { 864 rv = -ENOMEM; 865 goto out; 866 } 867 868 page->va = (dbrecords_va & PAGE_MASK); 869 page->refcnt = 0; 870 871 page->umem = ib_umem_get(ctx->ibucontext.device, 872 dbrecords_va & PAGE_MASK, PAGE_SIZE, 0); 873 if (IS_ERR(page->umem)) { 874 rv = PTR_ERR(page->umem); 875 kfree(page); 876 goto out; 877 } 878 879 list_add(&page->list, &ctx->dbrecords_page_list); 880 881 found: 882 *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) + 883 (dbrecords_va & ~PAGE_MASK); 884 *dbr_page = page; 885 page->refcnt++; 886 887 out: 888 mutex_unlock(&ctx->dbrecords_page_mutex); 889 return rv; 890 } 891 892 static void 893 erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx, 894 struct erdma_user_dbrecords_page **dbr_page) 895 { 896 if (!ctx || !(*dbr_page)) 897 return; 898 899 mutex_lock(&ctx->dbrecords_page_mutex); 900 if (--(*dbr_page)->refcnt == 0) { 901 list_del(&(*dbr_page)->list); 902 ib_umem_release((*dbr_page)->umem); 903 kfree(*dbr_page); 904 } 905 906 *dbr_page = NULL; 907 mutex_unlock(&ctx->dbrecords_page_mutex); 908 } 909 910 static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, 911 u64 va, u32 len, u64 dbrec_va) 912 { 913 dma_addr_t dbrec_dma; 914 u32 rq_offset; 915 int ret; 916 917 if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) + 918 qp->attrs.rq_size * RQE_SIZE)) 919 return -EINVAL; 920 921 ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va, 922 qp->attrs.sq_size << SQEBB_SHIFT, 0, va, 923 (SZ_1M - SZ_4K), true); 924 if (ret) 925 return ret; 926 927 rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE); 928 qp->user_qp.rq_offset = rq_offset; 929 930 ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset, 931 qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset, 932 (SZ_1M - SZ_4K), true); 933 if (ret) 934 goto put_sq_mtt; 935 936 ret = erdma_map_user_dbrecords(uctx, dbrec_va, 937 &qp->user_qp.user_dbr_page, 938 &dbrec_dma); 939 if (ret) 940 goto put_rq_mtt; 941 942 qp->user_qp.sq_dbrec_dma = dbrec_dma; 943 qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE; 944 945 return 0; 946 947 put_rq_mtt: 948 put_mtt_entries(qp->dev, &qp->user_qp.rq_mem); 949 950 put_sq_mtt: 951 put_mtt_entries(qp->dev, &qp->user_qp.sq_mem); 952 953 return ret; 954 } 955 956 static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx) 957 { 958 put_mtt_entries(qp->dev, &qp->user_qp.sq_mem); 959 put_mtt_entries(qp->dev, &qp->user_qp.rq_mem); 960 erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page); 961 } 962 963 int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, 964 struct ib_udata *udata) 965 { 966 struct erdma_qp *qp = to_eqp(ibqp); 967 struct erdma_dev *dev = to_edev(ibqp->device); 968 struct erdma_ucontext *uctx = rdma_udata_to_drv_context( 969 udata, struct erdma_ucontext, ibucontext); 970 struct erdma_ureq_create_qp ureq; 971 struct erdma_uresp_create_qp uresp; 972 void *old_entry; 973 int ret = 0; 974 975 ret = erdma_qp_validate_cap(dev, attrs); 976 if (ret) 977 goto err_out; 978 979 ret = erdma_qp_validate_attr(dev, attrs); 980 if (ret) 981 goto err_out; 982 983 qp->scq = to_ecq(attrs->send_cq); 984 qp->rcq = to_ecq(attrs->recv_cq); 985 qp->dev = dev; 986 qp->attrs.cc = dev->attrs.cc; 987 988 init_rwsem(&qp->state_lock); 989 kref_init(&qp->ref); 990 init_completion(&qp->safe_free); 991 992 if (qp->ibqp.qp_type == IB_QPT_GSI) { 993 old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL); 994 if (xa_is_err(old_entry)) 995 ret = xa_err(old_entry); 996 } else { 997 ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp, 998 XA_LIMIT(1, dev->attrs.max_qp - 1), 999 &dev->next_alloc_qpn, GFP_KERNEL); 1000 } 1001 1002 if (ret < 0) { 1003 ret = -ENOMEM; 1004 goto err_out; 1005 } 1006 1007 qp->attrs.sq_size = roundup_pow_of_two(attrs->cap.max_send_wr * 1008 ERDMA_MAX_WQEBB_PER_SQE); 1009 qp->attrs.rq_size = roundup_pow_of_two(attrs->cap.max_recv_wr); 1010 1011 if (uctx) { 1012 ret = ib_copy_from_udata(&ureq, udata, 1013 min(sizeof(ureq), udata->inlen)); 1014 if (ret) 1015 goto err_out_xa; 1016 1017 ret = init_user_qp(qp, uctx, ureq.qbuf_va, ureq.qbuf_len, 1018 ureq.db_record_va); 1019 if (ret) 1020 goto err_out_xa; 1021 1022 memset(&uresp, 0, sizeof(uresp)); 1023 1024 uresp.num_sqe = qp->attrs.sq_size; 1025 uresp.num_rqe = qp->attrs.rq_size; 1026 uresp.qp_id = QP_ID(qp); 1027 uresp.rq_offset = qp->user_qp.rq_offset; 1028 1029 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1030 if (ret) 1031 goto err_out_cmd; 1032 } else { 1033 init_kernel_qp(dev, qp, attrs); 1034 } 1035 1036 qp->attrs.max_send_sge = attrs->cap.max_send_sge; 1037 qp->attrs.max_recv_sge = attrs->cap.max_recv_sge; 1038 1039 if (erdma_device_iwarp(qp->dev)) 1040 qp->attrs.iwarp.state = ERDMA_QPS_IWARP_IDLE; 1041 else 1042 qp->attrs.rocev2.state = ERDMA_QPS_ROCEV2_RESET; 1043 1044 INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker); 1045 1046 ret = create_qp_cmd(uctx, qp); 1047 if (ret) 1048 goto err_out_cmd; 1049 1050 spin_lock_init(&qp->lock); 1051 1052 return 0; 1053 1054 err_out_cmd: 1055 if (uctx) 1056 free_user_qp(qp, uctx); 1057 else 1058 free_kernel_qp(qp); 1059 err_out_xa: 1060 xa_erase(&dev->qp_xa, QP_ID(qp)); 1061 err_out: 1062 return ret; 1063 } 1064 1065 static int erdma_create_stag(struct erdma_dev *dev, u32 *stag) 1066 { 1067 int stag_idx; 1068 1069 stag_idx = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX]); 1070 if (stag_idx < 0) 1071 return stag_idx; 1072 1073 /* For now, we always let key field be zero. */ 1074 *stag = (stag_idx << 8); 1075 1076 return 0; 1077 } 1078 1079 struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int acc) 1080 { 1081 struct erdma_dev *dev = to_edev(ibpd->device); 1082 struct erdma_mr *mr; 1083 u32 stag; 1084 int ret; 1085 1086 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1087 if (!mr) 1088 return ERR_PTR(-ENOMEM); 1089 1090 ret = erdma_create_stag(dev, &stag); 1091 if (ret) 1092 goto out_free; 1093 1094 mr->type = ERDMA_MR_TYPE_DMA; 1095 1096 mr->ibmr.lkey = stag; 1097 mr->ibmr.rkey = stag; 1098 mr->ibmr.pd = ibpd; 1099 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(acc); 1100 ret = regmr_cmd(dev, mr); 1101 if (ret) 1102 goto out_remove_stag; 1103 1104 return &mr->ibmr; 1105 1106 out_remove_stag: 1107 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], 1108 mr->ibmr.lkey >> 8); 1109 1110 out_free: 1111 kfree(mr); 1112 1113 return ERR_PTR(ret); 1114 } 1115 1116 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, 1117 u32 max_num_sg) 1118 { 1119 struct erdma_mr *mr; 1120 struct erdma_dev *dev = to_edev(ibpd->device); 1121 int ret; 1122 u32 stag; 1123 1124 if (mr_type != IB_MR_TYPE_MEM_REG) 1125 return ERR_PTR(-EOPNOTSUPP); 1126 1127 if (max_num_sg > ERDMA_MR_MAX_MTT_CNT) 1128 return ERR_PTR(-EINVAL); 1129 1130 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1131 if (!mr) 1132 return ERR_PTR(-ENOMEM); 1133 1134 ret = erdma_create_stag(dev, &stag); 1135 if (ret) 1136 goto out_free; 1137 1138 mr->type = ERDMA_MR_TYPE_FRMR; 1139 1140 mr->ibmr.lkey = stag; 1141 mr->ibmr.rkey = stag; 1142 mr->ibmr.pd = ibpd; 1143 /* update it in FRMR. */ 1144 mr->access = ERDMA_MR_ACC_LR | ERDMA_MR_ACC_LW | ERDMA_MR_ACC_RR | 1145 ERDMA_MR_ACC_RW; 1146 1147 mr->mem.page_size = PAGE_SIZE; /* update it later. */ 1148 mr->mem.page_cnt = max_num_sg; 1149 mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true); 1150 if (IS_ERR(mr->mem.mtt)) { 1151 ret = PTR_ERR(mr->mem.mtt); 1152 goto out_remove_stag; 1153 } 1154 1155 ret = regmr_cmd(dev, mr); 1156 if (ret) 1157 goto out_destroy_mtt; 1158 1159 return &mr->ibmr; 1160 1161 out_destroy_mtt: 1162 erdma_destroy_mtt(dev, mr->mem.mtt); 1163 1164 out_remove_stag: 1165 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], 1166 mr->ibmr.lkey >> 8); 1167 1168 out_free: 1169 kfree(mr); 1170 1171 return ERR_PTR(ret); 1172 } 1173 1174 static int erdma_set_page(struct ib_mr *ibmr, u64 addr) 1175 { 1176 struct erdma_mr *mr = to_emr(ibmr); 1177 1178 if (mr->mem.mtt_nents >= mr->mem.page_cnt) 1179 return -1; 1180 1181 mr->mem.mtt->buf[mr->mem.mtt_nents] = addr; 1182 mr->mem.mtt_nents++; 1183 1184 return 0; 1185 } 1186 1187 int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, 1188 unsigned int *sg_offset) 1189 { 1190 struct erdma_mr *mr = to_emr(ibmr); 1191 int num; 1192 1193 mr->mem.mtt_nents = 0; 1194 1195 num = ib_sg_to_pages(&mr->ibmr, sg, sg_nents, sg_offset, 1196 erdma_set_page); 1197 1198 return num; 1199 } 1200 1201 struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, 1202 u64 virt, int access, struct ib_udata *udata) 1203 { 1204 struct erdma_mr *mr = NULL; 1205 struct erdma_dev *dev = to_edev(ibpd->device); 1206 u32 stag; 1207 int ret; 1208 1209 if (!len || len > dev->attrs.max_mr_size) 1210 return ERR_PTR(-EINVAL); 1211 1212 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1213 if (!mr) 1214 return ERR_PTR(-ENOMEM); 1215 1216 ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt, 1217 SZ_2G - SZ_4K, false); 1218 if (ret) 1219 goto err_out_free; 1220 1221 ret = erdma_create_stag(dev, &stag); 1222 if (ret) 1223 goto err_out_put_mtt; 1224 1225 mr->ibmr.lkey = mr->ibmr.rkey = stag; 1226 mr->ibmr.pd = ibpd; 1227 mr->mem.va = virt; 1228 mr->mem.len = len; 1229 mr->access = ERDMA_MR_ACC_LR | to_erdma_access_flags(access); 1230 mr->valid = 1; 1231 mr->type = ERDMA_MR_TYPE_NORMAL; 1232 1233 ret = regmr_cmd(dev, mr); 1234 if (ret) 1235 goto err_out_mr; 1236 1237 return &mr->ibmr; 1238 1239 err_out_mr: 1240 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], 1241 mr->ibmr.lkey >> 8); 1242 1243 err_out_put_mtt: 1244 put_mtt_entries(dev, &mr->mem); 1245 1246 err_out_free: 1247 kfree(mr); 1248 1249 return ERR_PTR(ret); 1250 } 1251 1252 int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1253 { 1254 struct erdma_mr *mr; 1255 struct erdma_dev *dev = to_edev(ibmr->device); 1256 struct erdma_cmdq_dereg_mr_req req; 1257 int ret; 1258 1259 mr = to_emr(ibmr); 1260 1261 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1262 CMDQ_OPCODE_DEREG_MR); 1263 1264 req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) | 1265 FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF); 1266 1267 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1268 true); 1269 if (ret) 1270 return ret; 1271 1272 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX], ibmr->lkey >> 8); 1273 1274 put_mtt_entries(dev, &mr->mem); 1275 1276 kfree(mr); 1277 return 0; 1278 } 1279 1280 int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) 1281 { 1282 struct erdma_cq *cq = to_ecq(ibcq); 1283 struct erdma_dev *dev = to_edev(ibcq->device); 1284 struct erdma_ucontext *ctx = rdma_udata_to_drv_context( 1285 udata, struct erdma_ucontext, ibucontext); 1286 int err; 1287 struct erdma_cmdq_destroy_cq_req req; 1288 1289 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1290 CMDQ_OPCODE_DESTROY_CQ); 1291 req.cqn = cq->cqn; 1292 1293 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1294 true); 1295 if (err) 1296 return err; 1297 1298 if (rdma_is_kernel_res(&cq->ibcq.res)) { 1299 dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, 1300 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); 1301 dma_pool_free(dev->db_pool, cq->kern_cq.dbrec, 1302 cq->kern_cq.dbrec_dma); 1303 } else { 1304 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); 1305 put_mtt_entries(dev, &cq->user_cq.qbuf_mem); 1306 } 1307 1308 xa_erase(&dev->cq_xa, cq->cqn); 1309 1310 return 0; 1311 } 1312 1313 int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 1314 { 1315 struct erdma_qp *qp = to_eqp(ibqp); 1316 struct erdma_dev *dev = to_edev(ibqp->device); 1317 struct erdma_ucontext *ctx = rdma_udata_to_drv_context( 1318 udata, struct erdma_ucontext, ibucontext); 1319 struct erdma_cmdq_destroy_qp_req req; 1320 union erdma_mod_qp_params params; 1321 int err; 1322 1323 down_write(&qp->state_lock); 1324 if (erdma_device_iwarp(dev)) { 1325 params.iwarp.state = ERDMA_QPS_IWARP_ERROR; 1326 erdma_modify_qp_state_iwarp(qp, ¶ms.iwarp, 1327 ERDMA_QPA_IWARP_STATE); 1328 } else { 1329 params.rocev2.state = ERDMA_QPS_ROCEV2_ERROR; 1330 erdma_modify_qp_state_rocev2(qp, ¶ms.rocev2, 1331 ERDMA_QPA_ROCEV2_STATE); 1332 } 1333 up_write(&qp->state_lock); 1334 1335 cancel_delayed_work_sync(&qp->reflush_dwork); 1336 1337 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1338 CMDQ_OPCODE_DESTROY_QP); 1339 req.qpn = QP_ID(qp); 1340 1341 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1342 true); 1343 if (err) 1344 return err; 1345 1346 erdma_qp_put(qp); 1347 wait_for_completion(&qp->safe_free); 1348 1349 if (rdma_is_kernel_res(&qp->ibqp.res)) { 1350 free_kernel_qp(qp); 1351 } else { 1352 put_mtt_entries(dev, &qp->user_qp.sq_mem); 1353 put_mtt_entries(dev, &qp->user_qp.rq_mem); 1354 erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page); 1355 } 1356 1357 if (qp->cep) 1358 erdma_cep_put(qp->cep); 1359 xa_erase(&dev->qp_xa, QP_ID(qp)); 1360 1361 return 0; 1362 } 1363 1364 void erdma_qp_get_ref(struct ib_qp *ibqp) 1365 { 1366 erdma_qp_get(to_eqp(ibqp)); 1367 } 1368 1369 void erdma_qp_put_ref(struct ib_qp *ibqp) 1370 { 1371 erdma_qp_put(to_eqp(ibqp)); 1372 } 1373 1374 int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma) 1375 { 1376 struct rdma_user_mmap_entry *rdma_entry; 1377 struct erdma_user_mmap_entry *entry; 1378 pgprot_t prot; 1379 int err; 1380 1381 rdma_entry = rdma_user_mmap_entry_get(ctx, vma); 1382 if (!rdma_entry) 1383 return -EINVAL; 1384 1385 entry = to_emmap(rdma_entry); 1386 1387 switch (entry->mmap_flag) { 1388 case ERDMA_MMAP_IO_NC: 1389 /* map doorbell. */ 1390 prot = pgprot_device(vma->vm_page_prot); 1391 break; 1392 default: 1393 err = -EINVAL; 1394 goto put_entry; 1395 } 1396 1397 err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE, 1398 prot, rdma_entry); 1399 1400 put_entry: 1401 rdma_user_mmap_entry_put(rdma_entry); 1402 return err; 1403 } 1404 1405 void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 1406 { 1407 struct erdma_user_mmap_entry *entry = to_emmap(rdma_entry); 1408 1409 kfree(entry); 1410 } 1411 1412 static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx, 1413 bool ext_db_en) 1414 { 1415 struct erdma_cmdq_ext_db_req req = {}; 1416 u64 val0, val1; 1417 int ret; 1418 1419 /* 1420 * CAP_SYS_RAWIO is required if hardware does not support extend 1421 * doorbell mechanism. 1422 */ 1423 if (!ext_db_en && !capable(CAP_SYS_RAWIO)) 1424 return -EPERM; 1425 1426 if (!ext_db_en) { 1427 ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET; 1428 ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET; 1429 ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET; 1430 return 0; 1431 } 1432 1433 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 1434 CMDQ_OPCODE_ALLOC_DB); 1435 1436 req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) | 1437 FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) | 1438 FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1); 1439 1440 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1, 1441 true); 1442 if (ret) 1443 return ret; 1444 1445 ctx->ext_db.enable = true; 1446 ctx->ext_db.sdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_SDB); 1447 ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB); 1448 ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB); 1449 1450 ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT); 1451 ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT); 1452 ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT); 1453 1454 return 0; 1455 } 1456 1457 static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx) 1458 { 1459 struct erdma_cmdq_ext_db_req req = {}; 1460 int ret; 1461 1462 if (!ctx->ext_db.enable) 1463 return; 1464 1465 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 1466 CMDQ_OPCODE_FREE_DB); 1467 1468 req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) | 1469 FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) | 1470 FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1); 1471 1472 req.sdb_off = ctx->ext_db.sdb_off; 1473 req.rdb_off = ctx->ext_db.rdb_off; 1474 req.cdb_off = ctx->ext_db.cdb_off; 1475 1476 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 1477 true); 1478 if (ret) 1479 ibdev_err_ratelimited(&dev->ibdev, 1480 "free db resources failed %d", ret); 1481 } 1482 1483 static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx) 1484 { 1485 rdma_user_mmap_entry_remove(uctx->sq_db_mmap_entry); 1486 rdma_user_mmap_entry_remove(uctx->rq_db_mmap_entry); 1487 rdma_user_mmap_entry_remove(uctx->cq_db_mmap_entry); 1488 } 1489 1490 int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata) 1491 { 1492 struct erdma_ucontext *ctx = to_ectx(ibctx); 1493 struct erdma_dev *dev = to_edev(ibctx->device); 1494 int ret; 1495 struct erdma_uresp_alloc_ctx uresp = {}; 1496 1497 if (atomic_inc_return(&dev->num_ctx) > ERDMA_MAX_CONTEXT) { 1498 ret = -ENOMEM; 1499 goto err_out; 1500 } 1501 1502 if (udata->outlen < sizeof(uresp)) { 1503 ret = -EINVAL; 1504 goto err_out; 1505 } 1506 1507 INIT_LIST_HEAD(&ctx->dbrecords_page_list); 1508 mutex_init(&ctx->dbrecords_page_mutex); 1509 1510 ret = alloc_db_resources(dev, ctx, 1511 !!(dev->attrs.cap_flags & 1512 ERDMA_DEV_CAP_FLAGS_EXTEND_DB)); 1513 if (ret) 1514 goto err_out; 1515 1516 ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert( 1517 ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb); 1518 if (!ctx->sq_db_mmap_entry) { 1519 ret = -ENOMEM; 1520 goto err_free_ext_db; 1521 } 1522 1523 ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert( 1524 ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb); 1525 if (!ctx->rq_db_mmap_entry) { 1526 ret = -EINVAL; 1527 goto err_put_mmap_entries; 1528 } 1529 1530 ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert( 1531 ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb); 1532 if (!ctx->cq_db_mmap_entry) { 1533 ret = -EINVAL; 1534 goto err_put_mmap_entries; 1535 } 1536 1537 uresp.dev_id = dev->pdev->device; 1538 1539 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1540 if (ret) 1541 goto err_put_mmap_entries; 1542 1543 return 0; 1544 1545 err_put_mmap_entries: 1546 erdma_uctx_user_mmap_entries_remove(ctx); 1547 1548 err_free_ext_db: 1549 free_db_resources(dev, ctx); 1550 1551 err_out: 1552 atomic_dec(&dev->num_ctx); 1553 return ret; 1554 } 1555 1556 void erdma_dealloc_ucontext(struct ib_ucontext *ibctx) 1557 { 1558 struct erdma_dev *dev = to_edev(ibctx->device); 1559 struct erdma_ucontext *ctx = to_ectx(ibctx); 1560 1561 erdma_uctx_user_mmap_entries_remove(ctx); 1562 free_db_resources(dev, ctx); 1563 atomic_dec(&dev->num_ctx); 1564 } 1565 1566 static void erdma_attr_to_av(const struct rdma_ah_attr *ah_attr, 1567 struct erdma_av *av, u16 sport) 1568 { 1569 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); 1570 1571 av->port = rdma_ah_get_port_num(ah_attr); 1572 av->sgid_index = grh->sgid_index; 1573 av->hop_limit = grh->hop_limit; 1574 av->traffic_class = grh->traffic_class; 1575 av->sl = rdma_ah_get_sl(ah_attr); 1576 1577 av->flow_label = grh->flow_label; 1578 av->udp_sport = sport; 1579 1580 ether_addr_copy(av->dmac, ah_attr->roce.dmac); 1581 memcpy(av->dgid, grh->dgid.raw, ERDMA_ROCEV2_GID_SIZE); 1582 1583 if (ipv6_addr_v4mapped((struct in6_addr *)&grh->dgid)) 1584 av->ntype = ERDMA_NETWORK_TYPE_IPV4; 1585 else 1586 av->ntype = ERDMA_NETWORK_TYPE_IPV6; 1587 } 1588 1589 static void erdma_av_to_attr(struct erdma_av *av, struct rdma_ah_attr *ah_attr) 1590 { 1591 ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE; 1592 1593 rdma_ah_set_sl(ah_attr, av->sl); 1594 rdma_ah_set_port_num(ah_attr, av->port); 1595 rdma_ah_set_ah_flags(ah_attr, IB_AH_GRH); 1596 1597 rdma_ah_set_grh(ah_attr, NULL, av->flow_label, av->sgid_index, 1598 av->hop_limit, av->traffic_class); 1599 rdma_ah_set_dgid_raw(ah_attr, av->dgid); 1600 } 1601 1602 static int ib_qps_to_erdma_qps[ERDMA_PROTO_COUNT][IB_QPS_ERR + 1] = { 1603 [ERDMA_PROTO_IWARP] = { 1604 [IB_QPS_RESET] = ERDMA_QPS_IWARP_IDLE, 1605 [IB_QPS_INIT] = ERDMA_QPS_IWARP_IDLE, 1606 [IB_QPS_RTR] = ERDMA_QPS_IWARP_RTR, 1607 [IB_QPS_RTS] = ERDMA_QPS_IWARP_RTS, 1608 [IB_QPS_SQD] = ERDMA_QPS_IWARP_CLOSING, 1609 [IB_QPS_SQE] = ERDMA_QPS_IWARP_TERMINATE, 1610 [IB_QPS_ERR] = ERDMA_QPS_IWARP_ERROR, 1611 }, 1612 [ERDMA_PROTO_ROCEV2] = { 1613 [IB_QPS_RESET] = ERDMA_QPS_ROCEV2_RESET, 1614 [IB_QPS_INIT] = ERDMA_QPS_ROCEV2_INIT, 1615 [IB_QPS_RTR] = ERDMA_QPS_ROCEV2_RTR, 1616 [IB_QPS_RTS] = ERDMA_QPS_ROCEV2_RTS, 1617 [IB_QPS_SQD] = ERDMA_QPS_ROCEV2_SQD, 1618 [IB_QPS_SQE] = ERDMA_QPS_ROCEV2_SQE, 1619 [IB_QPS_ERR] = ERDMA_QPS_ROCEV2_ERROR, 1620 }, 1621 }; 1622 1623 static int erdma_qps_to_ib_qps[ERDMA_PROTO_COUNT][ERDMA_QPS_ROCEV2_COUNT] = { 1624 [ERDMA_PROTO_IWARP] = { 1625 [ERDMA_QPS_IWARP_IDLE] = IB_QPS_INIT, 1626 [ERDMA_QPS_IWARP_RTR] = IB_QPS_RTR, 1627 [ERDMA_QPS_IWARP_RTS] = IB_QPS_RTS, 1628 [ERDMA_QPS_IWARP_CLOSING] = IB_QPS_ERR, 1629 [ERDMA_QPS_IWARP_TERMINATE] = IB_QPS_ERR, 1630 [ERDMA_QPS_IWARP_ERROR] = IB_QPS_ERR, 1631 }, 1632 [ERDMA_PROTO_ROCEV2] = { 1633 [ERDMA_QPS_ROCEV2_RESET] = IB_QPS_RESET, 1634 [ERDMA_QPS_ROCEV2_INIT] = IB_QPS_INIT, 1635 [ERDMA_QPS_ROCEV2_RTR] = IB_QPS_RTR, 1636 [ERDMA_QPS_ROCEV2_RTS] = IB_QPS_RTS, 1637 [ERDMA_QPS_ROCEV2_SQD] = IB_QPS_SQD, 1638 [ERDMA_QPS_ROCEV2_SQE] = IB_QPS_SQE, 1639 [ERDMA_QPS_ROCEV2_ERROR] = IB_QPS_ERR, 1640 }, 1641 }; 1642 1643 static inline enum erdma_qps_iwarp ib_to_iwarp_qps(enum ib_qp_state state) 1644 { 1645 return ib_qps_to_erdma_qps[ERDMA_PROTO_IWARP][state]; 1646 } 1647 1648 static inline enum erdma_qps_rocev2 ib_to_rocev2_qps(enum ib_qp_state state) 1649 { 1650 return ib_qps_to_erdma_qps[ERDMA_PROTO_ROCEV2][state]; 1651 } 1652 1653 static inline enum ib_qp_state iwarp_to_ib_qps(enum erdma_qps_iwarp state) 1654 { 1655 return erdma_qps_to_ib_qps[ERDMA_PROTO_IWARP][state]; 1656 } 1657 1658 static inline enum ib_qp_state rocev2_to_ib_qps(enum erdma_qps_rocev2 state) 1659 { 1660 return erdma_qps_to_ib_qps[ERDMA_PROTO_ROCEV2][state]; 1661 } 1662 1663 static int erdma_check_qp_attrs(struct erdma_qp *qp, struct ib_qp_attr *attr, 1664 int attr_mask) 1665 { 1666 enum ib_qp_state cur_state, nxt_state; 1667 struct erdma_dev *dev = qp->dev; 1668 int ret = -EINVAL; 1669 1670 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) { 1671 ret = -EOPNOTSUPP; 1672 goto out; 1673 } 1674 1675 if ((attr_mask & IB_QP_PORT) && 1676 !rdma_is_port_valid(&dev->ibdev, attr->port_num)) 1677 goto out; 1678 1679 if (erdma_device_rocev2(dev)) { 1680 cur_state = (attr_mask & IB_QP_CUR_STATE) ? 1681 attr->cur_qp_state : 1682 rocev2_to_ib_qps(qp->attrs.rocev2.state); 1683 1684 nxt_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : 1685 cur_state; 1686 1687 if (!ib_modify_qp_is_ok(cur_state, nxt_state, qp->ibqp.qp_type, 1688 attr_mask)) 1689 goto out; 1690 1691 if ((attr_mask & IB_QP_AV) && 1692 erdma_check_gid_attr( 1693 rdma_ah_read_grh(&attr->ah_attr)->sgid_attr)) 1694 goto out; 1695 1696 if ((attr_mask & IB_QP_PKEY_INDEX) && 1697 attr->pkey_index >= ERDMA_MAX_PKEYS) 1698 goto out; 1699 } 1700 1701 return 0; 1702 1703 out: 1704 return ret; 1705 } 1706 1707 static void erdma_init_mod_qp_params_rocev2( 1708 struct erdma_qp *qp, struct erdma_mod_qp_params_rocev2 *params, 1709 int *erdma_attr_mask, struct ib_qp_attr *attr, int ib_attr_mask) 1710 { 1711 enum erdma_qpa_mask_rocev2 to_modify_attrs = 0; 1712 enum erdma_qps_rocev2 cur_state, nxt_state; 1713 u16 udp_sport; 1714 1715 if (ib_attr_mask & IB_QP_CUR_STATE) 1716 cur_state = ib_to_rocev2_qps(attr->cur_qp_state); 1717 else 1718 cur_state = qp->attrs.rocev2.state; 1719 1720 if (ib_attr_mask & IB_QP_STATE) 1721 nxt_state = ib_to_rocev2_qps(attr->qp_state); 1722 else 1723 nxt_state = cur_state; 1724 1725 to_modify_attrs |= ERDMA_QPA_ROCEV2_STATE; 1726 params->state = nxt_state; 1727 1728 if (ib_attr_mask & IB_QP_QKEY) { 1729 to_modify_attrs |= ERDMA_QPA_ROCEV2_QKEY; 1730 params->qkey = attr->qkey; 1731 } 1732 1733 if (ib_attr_mask & IB_QP_SQ_PSN) { 1734 to_modify_attrs |= ERDMA_QPA_ROCEV2_SQ_PSN; 1735 params->sq_psn = attr->sq_psn; 1736 } 1737 1738 if (ib_attr_mask & IB_QP_RQ_PSN) { 1739 to_modify_attrs |= ERDMA_QPA_ROCEV2_RQ_PSN; 1740 params->rq_psn = attr->rq_psn; 1741 } 1742 1743 if (ib_attr_mask & IB_QP_DEST_QPN) { 1744 to_modify_attrs |= ERDMA_QPA_ROCEV2_DST_QPN; 1745 params->dst_qpn = attr->dest_qp_num; 1746 } 1747 1748 if (ib_attr_mask & IB_QP_AV) { 1749 to_modify_attrs |= ERDMA_QPA_ROCEV2_AV; 1750 udp_sport = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, 1751 QP_ID(qp), params->dst_qpn); 1752 erdma_attr_to_av(&attr->ah_attr, ¶ms->av, udp_sport); 1753 } 1754 1755 *erdma_attr_mask = to_modify_attrs; 1756 } 1757 1758 int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1759 struct ib_udata *udata) 1760 { 1761 struct erdma_qp *qp = to_eqp(ibqp); 1762 union erdma_mod_qp_params params; 1763 int ret = 0, erdma_attr_mask = 0; 1764 1765 down_write(&qp->state_lock); 1766 1767 ret = erdma_check_qp_attrs(qp, attr, attr_mask); 1768 if (ret) 1769 goto out; 1770 1771 if (erdma_device_iwarp(qp->dev)) { 1772 if (attr_mask & IB_QP_STATE) { 1773 erdma_attr_mask |= ERDMA_QPA_IWARP_STATE; 1774 params.iwarp.state = ib_to_iwarp_qps(attr->qp_state); 1775 } 1776 1777 ret = erdma_modify_qp_state_iwarp(qp, ¶ms.iwarp, 1778 erdma_attr_mask); 1779 } else { 1780 erdma_init_mod_qp_params_rocev2( 1781 qp, ¶ms.rocev2, &erdma_attr_mask, attr, attr_mask); 1782 1783 ret = erdma_modify_qp_state_rocev2(qp, ¶ms.rocev2, 1784 erdma_attr_mask); 1785 } 1786 1787 out: 1788 up_write(&qp->state_lock); 1789 return ret; 1790 } 1791 1792 static enum ib_qp_state query_qp_state(struct erdma_qp *qp) 1793 { 1794 if (erdma_device_iwarp(qp->dev)) 1795 return iwarp_to_ib_qps(qp->attrs.iwarp.state); 1796 else 1797 return rocev2_to_ib_qps(qp->attrs.rocev2.state); 1798 } 1799 1800 int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 1801 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1802 { 1803 struct erdma_cmdq_query_qp_req_rocev2 req; 1804 struct erdma_dev *dev; 1805 struct erdma_qp *qp; 1806 u64 resp0, resp1; 1807 int ret; 1808 1809 if (ibqp && qp_attr && qp_init_attr) { 1810 qp = to_eqp(ibqp); 1811 dev = to_edev(ibqp->device); 1812 } else { 1813 return -EINVAL; 1814 } 1815 1816 qp_attr->cap.max_inline_data = ERDMA_MAX_INLINE; 1817 qp_init_attr->cap.max_inline_data = ERDMA_MAX_INLINE; 1818 1819 qp_attr->cap.max_send_wr = qp->attrs.sq_size; 1820 qp_attr->cap.max_recv_wr = qp->attrs.rq_size; 1821 qp_attr->cap.max_send_sge = qp->attrs.max_send_sge; 1822 qp_attr->cap.max_recv_sge = qp->attrs.max_recv_sge; 1823 1824 qp_attr->path_mtu = ib_mtu_int_to_enum(dev->netdev->mtu); 1825 qp_attr->max_rd_atomic = qp->attrs.irq_size; 1826 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size; 1827 1828 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 1829 IB_ACCESS_REMOTE_WRITE | 1830 IB_ACCESS_REMOTE_READ; 1831 1832 qp_init_attr->cap = qp_attr->cap; 1833 1834 if (erdma_device_rocev2(dev)) { 1835 /* Query hardware to get some attributes */ 1836 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 1837 CMDQ_OPCODE_QUERY_QP); 1838 req.qpn = QP_ID(qp); 1839 1840 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, 1841 &resp1, true); 1842 if (ret) 1843 return ret; 1844 1845 qp_attr->sq_psn = 1846 FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_SQ_PSN_MASK, resp0); 1847 qp_attr->rq_psn = 1848 FIELD_GET(ERDMA_CMD_QUERY_QP_RESP_RQ_PSN_MASK, resp0); 1849 qp_attr->qp_state = rocev2_to_ib_qps(FIELD_GET( 1850 ERDMA_CMD_QUERY_QP_RESP_QP_STATE_MASK, resp0)); 1851 qp_attr->cur_qp_state = qp_attr->qp_state; 1852 qp_attr->sq_draining = FIELD_GET( 1853 ERDMA_CMD_QUERY_QP_RESP_SQ_DRAINING_MASK, resp0); 1854 1855 qp_attr->pkey_index = 0; 1856 qp_attr->dest_qp_num = qp->attrs.rocev2.dst_qpn; 1857 1858 if (qp->ibqp.qp_type == IB_QPT_RC) 1859 erdma_av_to_attr(&qp->attrs.rocev2.av, 1860 &qp_attr->ah_attr); 1861 } else { 1862 qp_attr->qp_state = query_qp_state(qp); 1863 qp_attr->cur_qp_state = qp_attr->qp_state; 1864 } 1865 1866 return 0; 1867 } 1868 1869 static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq, 1870 struct erdma_ureq_create_cq *ureq) 1871 { 1872 int ret; 1873 struct erdma_dev *dev = to_edev(cq->ibcq.device); 1874 1875 ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va, 1876 ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K, 1877 true); 1878 if (ret) 1879 return ret; 1880 1881 ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va, 1882 &cq->user_cq.user_dbr_page, 1883 &cq->user_cq.dbrec_dma); 1884 if (ret) 1885 put_mtt_entries(dev, &cq->user_cq.qbuf_mem); 1886 1887 return ret; 1888 } 1889 1890 static int erdma_init_kernel_cq(struct erdma_cq *cq) 1891 { 1892 struct erdma_dev *dev = to_edev(cq->ibcq.device); 1893 1894 cq->kern_cq.qbuf = 1895 dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, 1896 &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL); 1897 if (!cq->kern_cq.qbuf) 1898 return -ENOMEM; 1899 1900 cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, 1901 &cq->kern_cq.dbrec_dma); 1902 if (!cq->kern_cq.dbrec) 1903 goto err_out; 1904 1905 spin_lock_init(&cq->kern_cq.lock); 1906 /* use default cqdb addr */ 1907 cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET; 1908 1909 return 0; 1910 1911 err_out: 1912 dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, 1913 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); 1914 1915 return -ENOMEM; 1916 } 1917 1918 int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 1919 struct uverbs_attr_bundle *attrs) 1920 { 1921 struct ib_udata *udata = &attrs->driver_udata; 1922 struct erdma_cq *cq = to_ecq(ibcq); 1923 struct erdma_dev *dev = to_edev(ibcq->device); 1924 unsigned int depth = attr->cqe; 1925 int ret; 1926 struct erdma_ucontext *ctx = rdma_udata_to_drv_context( 1927 udata, struct erdma_ucontext, ibucontext); 1928 1929 if (depth > dev->attrs.max_cqe) 1930 return -EINVAL; 1931 1932 depth = roundup_pow_of_two(depth); 1933 cq->ibcq.cqe = depth; 1934 cq->depth = depth; 1935 cq->assoc_eqn = attr->comp_vector + 1; 1936 1937 ret = xa_alloc_cyclic(&dev->cq_xa, &cq->cqn, cq, 1938 XA_LIMIT(1, dev->attrs.max_cq - 1), 1939 &dev->next_alloc_cqn, GFP_KERNEL); 1940 if (ret < 0) 1941 return ret; 1942 1943 if (!rdma_is_kernel_res(&ibcq->res)) { 1944 struct erdma_ureq_create_cq ureq; 1945 struct erdma_uresp_create_cq uresp; 1946 1947 ret = ib_copy_from_udata(&ureq, udata, 1948 min(udata->inlen, sizeof(ureq))); 1949 if (ret) 1950 goto err_out_xa; 1951 1952 ret = erdma_init_user_cq(ctx, cq, &ureq); 1953 if (ret) 1954 goto err_out_xa; 1955 1956 uresp.cq_id = cq->cqn; 1957 uresp.num_cqe = depth; 1958 1959 ret = ib_copy_to_udata(udata, &uresp, 1960 min(sizeof(uresp), udata->outlen)); 1961 if (ret) 1962 goto err_free_res; 1963 } else { 1964 ret = erdma_init_kernel_cq(cq); 1965 if (ret) 1966 goto err_out_xa; 1967 } 1968 1969 ret = create_cq_cmd(ctx, cq); 1970 if (ret) 1971 goto err_free_res; 1972 1973 return 0; 1974 1975 err_free_res: 1976 if (!rdma_is_kernel_res(&ibcq->res)) { 1977 erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); 1978 put_mtt_entries(dev, &cq->user_cq.qbuf_mem); 1979 } else { 1980 dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT, 1981 cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); 1982 dma_pool_free(dev->db_pool, cq->kern_cq.dbrec, 1983 cq->kern_cq.dbrec_dma); 1984 } 1985 1986 err_out_xa: 1987 xa_erase(&dev->cq_xa, cq->cqn); 1988 1989 return ret; 1990 } 1991 1992 void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext) 1993 { 1994 } 1995 1996 void erdma_set_mtu(struct erdma_dev *dev, u32 mtu) 1997 { 1998 struct erdma_cmdq_config_mtu_req req; 1999 2000 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 2001 CMDQ_OPCODE_CONF_MTU); 2002 req.mtu = mtu; 2003 2004 erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, true); 2005 } 2006 2007 void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason) 2008 { 2009 struct ib_event event; 2010 2011 event.device = &dev->ibdev; 2012 event.element.port_num = 1; 2013 event.event = reason; 2014 2015 ib_dispatch_event(&event); 2016 } 2017 2018 enum counters { 2019 ERDMA_STATS_TX_REQS_CNT, 2020 ERDMA_STATS_TX_PACKETS_CNT, 2021 ERDMA_STATS_TX_BYTES_CNT, 2022 ERDMA_STATS_TX_DISABLE_DROP_CNT, 2023 ERDMA_STATS_TX_BPS_METER_DROP_CNT, 2024 ERDMA_STATS_TX_PPS_METER_DROP_CNT, 2025 2026 ERDMA_STATS_RX_PACKETS_CNT, 2027 ERDMA_STATS_RX_BYTES_CNT, 2028 ERDMA_STATS_RX_DISABLE_DROP_CNT, 2029 ERDMA_STATS_RX_BPS_METER_DROP_CNT, 2030 ERDMA_STATS_RX_PPS_METER_DROP_CNT, 2031 2032 ERDMA_STATS_MAX 2033 }; 2034 2035 static const struct rdma_stat_desc erdma_descs[] = { 2036 [ERDMA_STATS_TX_REQS_CNT].name = "tx_reqs_cnt", 2037 [ERDMA_STATS_TX_PACKETS_CNT].name = "tx_packets_cnt", 2038 [ERDMA_STATS_TX_BYTES_CNT].name = "tx_bytes_cnt", 2039 [ERDMA_STATS_TX_DISABLE_DROP_CNT].name = "tx_disable_drop_cnt", 2040 [ERDMA_STATS_TX_BPS_METER_DROP_CNT].name = "tx_bps_limit_drop_cnt", 2041 [ERDMA_STATS_TX_PPS_METER_DROP_CNT].name = "tx_pps_limit_drop_cnt", 2042 [ERDMA_STATS_RX_PACKETS_CNT].name = "rx_packets_cnt", 2043 [ERDMA_STATS_RX_BYTES_CNT].name = "rx_bytes_cnt", 2044 [ERDMA_STATS_RX_DISABLE_DROP_CNT].name = "rx_disable_drop_cnt", 2045 [ERDMA_STATS_RX_BPS_METER_DROP_CNT].name = "rx_bps_limit_drop_cnt", 2046 [ERDMA_STATS_RX_PPS_METER_DROP_CNT].name = "rx_pps_limit_drop_cnt", 2047 }; 2048 2049 struct rdma_hw_stats *erdma_alloc_hw_port_stats(struct ib_device *device, 2050 u32 port_num) 2051 { 2052 return rdma_alloc_hw_stats_struct(erdma_descs, ERDMA_STATS_MAX, 2053 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2054 } 2055 2056 static int erdma_query_hw_stats(struct erdma_dev *dev, 2057 struct rdma_hw_stats *stats) 2058 { 2059 struct erdma_cmdq_query_stats_resp *resp; 2060 struct erdma_cmdq_query_req req; 2061 dma_addr_t dma_addr; 2062 int err; 2063 2064 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, 2065 CMDQ_OPCODE_GET_STATS); 2066 2067 resp = dma_pool_zalloc(dev->resp_pool, GFP_KERNEL, &dma_addr); 2068 if (!resp) 2069 return -ENOMEM; 2070 2071 req.target_addr = dma_addr; 2072 req.target_length = ERDMA_HW_RESP_SIZE; 2073 2074 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2075 true); 2076 if (err) 2077 goto out; 2078 2079 if (resp->hdr.magic != ERDMA_HW_RESP_MAGIC) { 2080 err = -EINVAL; 2081 goto out; 2082 } 2083 2084 memcpy(&stats->value[0], &resp->tx_req_cnt, 2085 sizeof(u64) * stats->num_counters); 2086 2087 out: 2088 dma_pool_free(dev->resp_pool, resp, dma_addr); 2089 2090 return err; 2091 } 2092 2093 int erdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 2094 u32 port, int index) 2095 { 2096 struct erdma_dev *dev = to_edev(ibdev); 2097 int ret; 2098 2099 if (port == 0) 2100 return 0; 2101 2102 ret = erdma_query_hw_stats(dev, stats); 2103 if (ret) 2104 return ret; 2105 2106 return stats->num_counters; 2107 } 2108 2109 enum rdma_link_layer erdma_get_link_layer(struct ib_device *ibdev, u32 port_num) 2110 { 2111 return IB_LINK_LAYER_ETHERNET; 2112 } 2113 2114 static int erdma_set_gid(struct erdma_dev *dev, u8 op, u32 idx, 2115 const union ib_gid *gid) 2116 { 2117 struct erdma_cmdq_set_gid_req req; 2118 u8 ntype; 2119 2120 req.cfg = FIELD_PREP(ERDMA_CMD_SET_GID_SGID_IDX_MASK, idx) | 2121 FIELD_PREP(ERDMA_CMD_SET_GID_OP_MASK, op); 2122 2123 if (op == ERDMA_SET_GID_OP_ADD) { 2124 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 2125 ntype = ERDMA_NETWORK_TYPE_IPV4; 2126 else 2127 ntype = ERDMA_NETWORK_TYPE_IPV6; 2128 2129 req.cfg |= FIELD_PREP(ERDMA_CMD_SET_GID_NTYPE_MASK, ntype); 2130 2131 memcpy(&req.gid, gid, ERDMA_ROCEV2_GID_SIZE); 2132 } 2133 2134 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 2135 CMDQ_OPCODE_SET_GID); 2136 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2137 true); 2138 } 2139 2140 int erdma_add_gid(const struct ib_gid_attr *attr, void **context) 2141 { 2142 struct erdma_dev *dev = to_edev(attr->device); 2143 int ret; 2144 2145 ret = erdma_check_gid_attr(attr); 2146 if (ret) 2147 return ret; 2148 2149 return erdma_set_gid(dev, ERDMA_SET_GID_OP_ADD, attr->index, 2150 &attr->gid); 2151 } 2152 2153 int erdma_del_gid(const struct ib_gid_attr *attr, void **context) 2154 { 2155 return erdma_set_gid(to_edev(attr->device), ERDMA_SET_GID_OP_DEL, 2156 attr->index, NULL); 2157 } 2158 2159 int erdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) 2160 { 2161 if (index >= ERDMA_MAX_PKEYS) 2162 return -EINVAL; 2163 2164 *pkey = ERDMA_DEFAULT_PKEY; 2165 return 0; 2166 } 2167 2168 void erdma_set_av_cfg(struct erdma_av_cfg *av_cfg, struct erdma_av *av) 2169 { 2170 av_cfg->cfg0 = FIELD_PREP(ERDMA_CMD_CREATE_AV_FL_MASK, av->flow_label) | 2171 FIELD_PREP(ERDMA_CMD_CREATE_AV_NTYPE_MASK, av->ntype); 2172 2173 av_cfg->traffic_class = av->traffic_class; 2174 av_cfg->hop_limit = av->hop_limit; 2175 av_cfg->sl = av->sl; 2176 2177 av_cfg->udp_sport = av->udp_sport; 2178 av_cfg->sgid_index = av->sgid_index; 2179 2180 ether_addr_copy(av_cfg->dmac, av->dmac); 2181 memcpy(av_cfg->dgid, av->dgid, ERDMA_ROCEV2_GID_SIZE); 2182 } 2183 2184 int erdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, 2185 struct ib_udata *udata) 2186 { 2187 const struct ib_global_route *grh = 2188 rdma_ah_read_grh(init_attr->ah_attr); 2189 struct erdma_dev *dev = to_edev(ibah->device); 2190 struct erdma_pd *pd = to_epd(ibah->pd); 2191 struct erdma_ah *ah = to_eah(ibah); 2192 struct erdma_cmdq_create_ah_req req; 2193 u32 udp_sport; 2194 int ret; 2195 2196 ret = erdma_check_gid_attr(grh->sgid_attr); 2197 if (ret) 2198 return ret; 2199 2200 ret = erdma_alloc_idx(&dev->res_cb[ERDMA_RES_TYPE_AH]); 2201 if (ret < 0) 2202 return ret; 2203 2204 ah->ahn = ret; 2205 2206 if (grh->flow_label) 2207 udp_sport = rdma_flow_label_to_udp_sport(grh->flow_label); 2208 else 2209 udp_sport = 2210 IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + (ah->ahn & 0x3FFF); 2211 2212 erdma_attr_to_av(init_attr->ah_attr, &ah->av, udp_sport); 2213 2214 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 2215 CMDQ_OPCODE_CREATE_AH); 2216 2217 req.pdn = pd->pdn; 2218 req.ahn = ah->ahn; 2219 erdma_set_av_cfg(&req.av_cfg, &ah->av); 2220 2221 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2222 init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); 2223 if (ret) { 2224 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn); 2225 return ret; 2226 } 2227 2228 return 0; 2229 } 2230 2231 int erdma_destroy_ah(struct ib_ah *ibah, u32 flags) 2232 { 2233 struct erdma_dev *dev = to_edev(ibah->device); 2234 struct erdma_pd *pd = to_epd(ibah->pd); 2235 struct erdma_ah *ah = to_eah(ibah); 2236 struct erdma_cmdq_destroy_ah_req req; 2237 int ret; 2238 2239 erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, 2240 CMDQ_OPCODE_DESTROY_AH); 2241 2242 req.pdn = pd->pdn; 2243 req.ahn = ah->ahn; 2244 2245 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, 2246 flags & RDMA_DESTROY_AH_SLEEPABLE); 2247 if (ret) 2248 return ret; 2249 2250 erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_AH], ah->ahn); 2251 2252 return 0; 2253 } 2254 2255 int erdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) 2256 { 2257 struct erdma_ah *ah = to_eah(ibah); 2258 2259 memset(ah_attr, 0, sizeof(*ah_attr)); 2260 erdma_av_to_attr(&ah->av, ah_attr); 2261 2262 return 0; 2263 } 2264