1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #include <linux/dma-buf.h> 7 #include <linux/dma-resv.h> 8 #include <linux/vmalloc.h> 9 #include <linux/log2.h> 10 11 #include <rdma/ib_addr.h> 12 #include <rdma/ib_umem.h> 13 #include <rdma/ib_user_verbs.h> 14 #include <rdma/ib_verbs.h> 15 #include <rdma/uverbs_ioctl.h> 16 #define UVERBS_MODULE_NAME efa_ib 17 #include <rdma/uverbs_named_ioctl.h> 18 #include <rdma/ib_user_ioctl_cmds.h> 19 20 #include "efa.h" 21 #include "efa_io_defs.h" 22 23 enum { 24 EFA_MMAP_DMA_PAGE = 0, 25 EFA_MMAP_IO_WC, 26 EFA_MMAP_IO_NC, 27 }; 28 29 struct efa_user_mmap_entry { 30 struct rdma_user_mmap_entry rdma_entry; 31 u64 address; 32 u8 mmap_flag; 33 }; 34 35 #define EFA_DEFINE_DEVICE_STATS(op) \ 36 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \ 37 op(EFA_COMPLETED_CMDS, "completed_cmds") \ 38 op(EFA_CMDS_ERR, "cmds_err") \ 39 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \ 40 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \ 41 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \ 42 op(EFA_CREATE_QP_ERR, "create_qp_err") \ 43 op(EFA_CREATE_CQ_ERR, "create_cq_err") \ 44 op(EFA_REG_MR_ERR, "reg_mr_err") \ 45 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \ 46 op(EFA_CREATE_AH_ERR, "create_ah_err") \ 47 op(EFA_MMAP_ERR, "mmap_err") 48 49 #define EFA_DEFINE_PORT_STATS(op) \ 50 op(EFA_TX_BYTES, "tx_bytes") \ 51 op(EFA_TX_PKTS, "tx_pkts") \ 52 op(EFA_RX_BYTES, "rx_bytes") \ 53 op(EFA_RX_PKTS, "rx_pkts") \ 54 op(EFA_RX_DROPS, "rx_drops") \ 55 op(EFA_SEND_BYTES, "send_bytes") \ 56 op(EFA_SEND_WRS, "send_wrs") \ 57 op(EFA_RECV_BYTES, "recv_bytes") \ 58 op(EFA_RECV_WRS, "recv_wrs") \ 59 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \ 60 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \ 61 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \ 62 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \ 63 op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \ 64 op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \ 65 op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \ 66 op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \ 67 68 #define EFA_STATS_ENUM(ename, name) ename, 69 #define EFA_STATS_STR(ename, nam) \ 70 [ename].name = nam, 71 72 enum efa_hw_device_stats { 73 EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM) 74 }; 75 76 static const struct rdma_stat_desc efa_device_stats_descs[] = { 77 EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR) 78 }; 79 80 enum efa_hw_port_stats { 81 EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM) 82 }; 83 84 static const struct rdma_stat_desc efa_port_stats_descs[] = { 85 EFA_DEFINE_PORT_STATS(EFA_STATS_STR) 86 }; 87 88 #define EFA_CHUNK_PAYLOAD_SHIFT 12 89 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) 90 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 91 92 #define EFA_CHUNK_SHIFT 12 93 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT) 94 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info) 95 96 #define EFA_PTRS_PER_CHUNK \ 97 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE) 98 99 #define EFA_CHUNK_USED_SIZE \ 100 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE) 101 102 struct pbl_chunk { 103 dma_addr_t dma_addr; 104 u64 *buf; 105 u32 length; 106 }; 107 108 struct pbl_chunk_list { 109 struct pbl_chunk *chunks; 110 unsigned int size; 111 }; 112 113 struct pbl_context { 114 union { 115 struct { 116 dma_addr_t dma_addr; 117 } continuous; 118 struct { 119 u32 pbl_buf_size_in_pages; 120 struct scatterlist *sgl; 121 int sg_dma_cnt; 122 struct pbl_chunk_list chunk_list; 123 } indirect; 124 } phys; 125 u64 *pbl_buf; 126 u32 pbl_buf_size_in_bytes; 127 u8 physically_continuous; 128 }; 129 130 static inline struct efa_dev *to_edev(struct ib_device *ibdev) 131 { 132 return container_of(ibdev, struct efa_dev, ibdev); 133 } 134 135 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext) 136 { 137 return container_of(ibucontext, struct efa_ucontext, ibucontext); 138 } 139 140 static inline struct efa_pd *to_epd(struct ib_pd *ibpd) 141 { 142 return container_of(ibpd, struct efa_pd, ibpd); 143 } 144 145 static inline struct efa_mr *to_emr(struct ib_mr *ibmr) 146 { 147 return container_of(ibmr, struct efa_mr, ibmr); 148 } 149 150 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp) 151 { 152 return container_of(ibqp, struct efa_qp, ibqp); 153 } 154 155 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq) 156 { 157 return container_of(ibcq, struct efa_cq, ibcq); 158 } 159 160 static inline struct efa_ah *to_eah(struct ib_ah *ibah) 161 { 162 return container_of(ibah, struct efa_ah, ibah); 163 } 164 165 static inline struct efa_user_mmap_entry * 166 to_emmap(struct rdma_user_mmap_entry *rdma_entry) 167 { 168 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry); 169 } 170 171 #define EFA_DEV_CAP(dev, cap) \ 172 ((dev)->dev_attr.device_caps & \ 173 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK) 174 175 #define is_reserved_cleared(reserved) \ 176 !memchr_inv(reserved, 0, sizeof(reserved)) 177 178 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr, 179 size_t size, enum dma_data_direction dir) 180 { 181 void *addr; 182 183 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 184 if (!addr) 185 return NULL; 186 187 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir); 188 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) { 189 ibdev_err(&dev->ibdev, "Failed to map DMA address\n"); 190 free_pages_exact(addr, size); 191 return NULL; 192 } 193 194 return addr; 195 } 196 197 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr, 198 dma_addr_t dma_addr, 199 size_t size, enum dma_data_direction dir) 200 { 201 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir); 202 free_pages_exact(cpu_addr, size); 203 } 204 205 int efa_query_device(struct ib_device *ibdev, 206 struct ib_device_attr *props, 207 struct ib_udata *udata) 208 { 209 struct efa_com_get_device_attr_result *dev_attr; 210 struct efa_ibv_ex_query_device_resp resp = {}; 211 struct efa_dev *dev = to_edev(ibdev); 212 int err; 213 214 if (udata && udata->inlen && 215 !ib_is_udata_cleared(udata, 0, udata->inlen)) { 216 ibdev_dbg(ibdev, 217 "Incompatible ABI params, udata not cleared\n"); 218 return -EINVAL; 219 } 220 221 dev_attr = &dev->dev_attr; 222 223 memset(props, 0, sizeof(*props)); 224 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE; 225 props->page_size_cap = dev_attr->page_size_cap; 226 props->vendor_id = dev->pdev->vendor; 227 props->vendor_part_id = dev->pdev->device; 228 props->hw_ver = dev->pdev->subsystem_device; 229 props->max_qp = dev_attr->max_qp; 230 props->max_cq = dev_attr->max_cq; 231 props->max_pd = dev_attr->max_pd; 232 props->max_mr = dev_attr->max_mr; 233 props->max_ah = dev_attr->max_ah; 234 props->max_cqe = dev_attr->max_cq_depth; 235 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth, 236 dev_attr->max_rq_depth); 237 props->max_send_sge = dev_attr->max_sq_sge; 238 props->max_recv_sge = dev_attr->max_rq_sge; 239 props->max_sge_rd = dev_attr->max_wr_rdma_sge; 240 props->max_pkeys = 1; 241 242 if (udata && udata->outlen) { 243 resp.max_sq_sge = dev_attr->max_sq_sge; 244 resp.max_rq_sge = dev_attr->max_rq_sge; 245 resp.max_sq_wr = dev_attr->max_sq_depth; 246 resp.max_rq_wr = dev_attr->max_rq_depth; 247 resp.max_rdma_size = dev_attr->max_rdma_size; 248 249 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID; 250 if (EFA_DEV_CAP(dev, RDMA_READ)) 251 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ; 252 253 if (EFA_DEV_CAP(dev, RNR_RETRY)) 254 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY; 255 256 if (EFA_DEV_CAP(dev, DATA_POLLING_128)) 257 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128; 258 259 if (EFA_DEV_CAP(dev, RDMA_WRITE)) 260 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE; 261 262 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV)) 263 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV; 264 265 if (dev->neqs) 266 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS; 267 268 err = ib_copy_to_udata(udata, &resp, 269 min(sizeof(resp), udata->outlen)); 270 if (err) { 271 ibdev_dbg(ibdev, 272 "Failed to copy udata for query_device\n"); 273 return err; 274 } 275 } 276 277 return 0; 278 } 279 280 int efa_query_port(struct ib_device *ibdev, u32 port, 281 struct ib_port_attr *props) 282 { 283 struct efa_dev *dev = to_edev(ibdev); 284 285 props->lmc = 1; 286 287 props->state = IB_PORT_ACTIVE; 288 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 289 props->gid_tbl_len = 1; 290 props->pkey_tbl_len = 1; 291 props->active_speed = IB_SPEED_EDR; 292 props->active_width = IB_WIDTH_4X; 293 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); 294 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu); 295 props->max_msg_sz = dev->dev_attr.mtu; 296 props->max_vl_num = 1; 297 298 return 0; 299 } 300 301 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 302 int qp_attr_mask, 303 struct ib_qp_init_attr *qp_init_attr) 304 { 305 struct efa_dev *dev = to_edev(ibqp->device); 306 struct efa_com_query_qp_params params = {}; 307 struct efa_com_query_qp_result result; 308 struct efa_qp *qp = to_eqp(ibqp); 309 int err; 310 311 #define EFA_QUERY_QP_SUPP_MASK \ 312 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \ 313 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY) 314 315 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) { 316 ibdev_dbg(&dev->ibdev, 317 "Unsupported qp_attr_mask[%#x] supported[%#x]\n", 318 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK); 319 return -EOPNOTSUPP; 320 } 321 322 memset(qp_attr, 0, sizeof(*qp_attr)); 323 memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 324 325 params.qp_handle = qp->qp_handle; 326 err = efa_com_query_qp(&dev->edev, ¶ms, &result); 327 if (err) 328 return err; 329 330 qp_attr->qp_state = result.qp_state; 331 qp_attr->qkey = result.qkey; 332 qp_attr->sq_psn = result.sq_psn; 333 qp_attr->sq_draining = result.sq_draining; 334 qp_attr->port_num = 1; 335 qp_attr->rnr_retry = result.rnr_retry; 336 337 qp_attr->cap.max_send_wr = qp->max_send_wr; 338 qp_attr->cap.max_recv_wr = qp->max_recv_wr; 339 qp_attr->cap.max_send_sge = qp->max_send_sge; 340 qp_attr->cap.max_recv_sge = qp->max_recv_sge; 341 qp_attr->cap.max_inline_data = qp->max_inline_data; 342 343 qp_init_attr->qp_type = ibqp->qp_type; 344 qp_init_attr->recv_cq = ibqp->recv_cq; 345 qp_init_attr->send_cq = ibqp->send_cq; 346 qp_init_attr->qp_context = ibqp->qp_context; 347 qp_init_attr->cap = qp_attr->cap; 348 349 return 0; 350 } 351 352 int efa_query_gid(struct ib_device *ibdev, u32 port, int index, 353 union ib_gid *gid) 354 { 355 struct efa_dev *dev = to_edev(ibdev); 356 357 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr)); 358 359 return 0; 360 } 361 362 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index, 363 u16 *pkey) 364 { 365 if (index > 0) 366 return -EINVAL; 367 368 *pkey = 0xffff; 369 return 0; 370 } 371 372 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn) 373 { 374 struct efa_com_dealloc_pd_params params = { 375 .pdn = pdn, 376 }; 377 378 return efa_com_dealloc_pd(&dev->edev, ¶ms); 379 } 380 381 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 382 { 383 struct efa_dev *dev = to_edev(ibpd->device); 384 struct efa_ibv_alloc_pd_resp resp = {}; 385 struct efa_com_alloc_pd_result result; 386 struct efa_pd *pd = to_epd(ibpd); 387 int err; 388 389 if (udata->inlen && 390 !ib_is_udata_cleared(udata, 0, udata->inlen)) { 391 ibdev_dbg(&dev->ibdev, 392 "Incompatible ABI params, udata not cleared\n"); 393 err = -EINVAL; 394 goto err_out; 395 } 396 397 err = efa_com_alloc_pd(&dev->edev, &result); 398 if (err) 399 goto err_out; 400 401 pd->pdn = result.pdn; 402 resp.pdn = result.pdn; 403 404 if (udata->outlen) { 405 err = ib_copy_to_udata(udata, &resp, 406 min(sizeof(resp), udata->outlen)); 407 if (err) { 408 ibdev_dbg(&dev->ibdev, 409 "Failed to copy udata for alloc_pd\n"); 410 goto err_dealloc_pd; 411 } 412 } 413 414 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn); 415 416 return 0; 417 418 err_dealloc_pd: 419 efa_pd_dealloc(dev, result.pdn); 420 err_out: 421 atomic64_inc(&dev->stats.alloc_pd_err); 422 return err; 423 } 424 425 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 426 { 427 struct efa_dev *dev = to_edev(ibpd->device); 428 struct efa_pd *pd = to_epd(ibpd); 429 430 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn); 431 efa_pd_dealloc(dev, pd->pdn); 432 return 0; 433 } 434 435 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle) 436 { 437 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle }; 438 439 return efa_com_destroy_qp(&dev->edev, ¶ms); 440 } 441 442 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp) 443 { 444 rdma_user_mmap_entry_remove(qp->rq_mmap_entry); 445 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry); 446 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry); 447 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry); 448 } 449 450 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 451 { 452 struct efa_dev *dev = to_edev(ibqp->pd->device); 453 struct efa_qp *qp = to_eqp(ibqp); 454 int err; 455 456 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num); 457 458 err = efa_destroy_qp_handle(dev, qp->qp_handle); 459 if (err) 460 return err; 461 462 efa_qp_user_mmap_entries_remove(qp); 463 464 if (qp->rq_cpu_addr) { 465 ibdev_dbg(&dev->ibdev, 466 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n", 467 qp->rq_cpu_addr, qp->rq_size, 468 &qp->rq_dma_addr); 469 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, 470 qp->rq_size, DMA_TO_DEVICE); 471 } 472 473 return 0; 474 } 475 476 static struct rdma_user_mmap_entry* 477 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext, 478 u64 address, size_t length, 479 u8 mmap_flag, u64 *offset) 480 { 481 struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 482 int err; 483 484 if (!entry) 485 return NULL; 486 487 entry->address = address; 488 entry->mmap_flag = mmap_flag; 489 490 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry, 491 length); 492 if (err) { 493 kfree(entry); 494 return NULL; 495 } 496 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry); 497 498 return &entry->rdma_entry; 499 } 500 501 static int qp_mmap_entries_setup(struct efa_qp *qp, 502 struct efa_dev *dev, 503 struct efa_ucontext *ucontext, 504 struct efa_com_create_qp_params *params, 505 struct efa_ibv_create_qp_resp *resp) 506 { 507 size_t length; 508 u64 address; 509 510 address = dev->db_bar_addr + resp->sq_db_offset; 511 qp->sq_db_mmap_entry = 512 efa_user_mmap_entry_insert(&ucontext->ibucontext, 513 address, 514 PAGE_SIZE, EFA_MMAP_IO_NC, 515 &resp->sq_db_mmap_key); 516 if (!qp->sq_db_mmap_entry) 517 return -ENOMEM; 518 519 resp->sq_db_offset &= ~PAGE_MASK; 520 521 address = dev->mem_bar_addr + resp->llq_desc_offset; 522 length = PAGE_ALIGN(params->sq_ring_size_in_bytes + 523 offset_in_page(resp->llq_desc_offset)); 524 525 qp->llq_desc_mmap_entry = 526 efa_user_mmap_entry_insert(&ucontext->ibucontext, 527 address, length, 528 EFA_MMAP_IO_WC, 529 &resp->llq_desc_mmap_key); 530 if (!qp->llq_desc_mmap_entry) 531 goto err_remove_mmap; 532 533 resp->llq_desc_offset &= ~PAGE_MASK; 534 535 if (qp->rq_size) { 536 address = dev->db_bar_addr + resp->rq_db_offset; 537 538 qp->rq_db_mmap_entry = 539 efa_user_mmap_entry_insert(&ucontext->ibucontext, 540 address, PAGE_SIZE, 541 EFA_MMAP_IO_NC, 542 &resp->rq_db_mmap_key); 543 if (!qp->rq_db_mmap_entry) 544 goto err_remove_mmap; 545 546 resp->rq_db_offset &= ~PAGE_MASK; 547 548 address = virt_to_phys(qp->rq_cpu_addr); 549 qp->rq_mmap_entry = 550 efa_user_mmap_entry_insert(&ucontext->ibucontext, 551 address, qp->rq_size, 552 EFA_MMAP_DMA_PAGE, 553 &resp->rq_mmap_key); 554 if (!qp->rq_mmap_entry) 555 goto err_remove_mmap; 556 557 resp->rq_mmap_size = qp->rq_size; 558 } 559 560 return 0; 561 562 err_remove_mmap: 563 efa_qp_user_mmap_entries_remove(qp); 564 565 return -ENOMEM; 566 } 567 568 static int efa_qp_validate_cap(struct efa_dev *dev, 569 struct ib_qp_init_attr *init_attr) 570 { 571 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) { 572 ibdev_dbg(&dev->ibdev, 573 "qp: requested send wr[%u] exceeds the max[%u]\n", 574 init_attr->cap.max_send_wr, 575 dev->dev_attr.max_sq_depth); 576 return -EINVAL; 577 } 578 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) { 579 ibdev_dbg(&dev->ibdev, 580 "qp: requested receive wr[%u] exceeds the max[%u]\n", 581 init_attr->cap.max_recv_wr, 582 dev->dev_attr.max_rq_depth); 583 return -EINVAL; 584 } 585 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) { 586 ibdev_dbg(&dev->ibdev, 587 "qp: requested sge send[%u] exceeds the max[%u]\n", 588 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge); 589 return -EINVAL; 590 } 591 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) { 592 ibdev_dbg(&dev->ibdev, 593 "qp: requested sge recv[%u] exceeds the max[%u]\n", 594 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge); 595 return -EINVAL; 596 } 597 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) { 598 ibdev_dbg(&dev->ibdev, 599 "qp: requested inline data[%u] exceeds the max[%u]\n", 600 init_attr->cap.max_inline_data, 601 dev->dev_attr.inline_buf_size); 602 return -EINVAL; 603 } 604 605 return 0; 606 } 607 608 static int efa_qp_validate_attr(struct efa_dev *dev, 609 struct ib_qp_init_attr *init_attr) 610 { 611 if (init_attr->qp_type != IB_QPT_DRIVER && 612 init_attr->qp_type != IB_QPT_UD) { 613 ibdev_dbg(&dev->ibdev, 614 "Unsupported qp type %d\n", init_attr->qp_type); 615 return -EOPNOTSUPP; 616 } 617 618 if (init_attr->srq) { 619 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n"); 620 return -EOPNOTSUPP; 621 } 622 623 if (init_attr->create_flags) { 624 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n"); 625 return -EOPNOTSUPP; 626 } 627 628 return 0; 629 } 630 631 int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, 632 struct ib_udata *udata) 633 { 634 struct efa_com_create_qp_params create_qp_params = {}; 635 struct efa_com_create_qp_result create_qp_resp; 636 struct efa_dev *dev = to_edev(ibqp->device); 637 struct efa_ibv_create_qp_resp resp = {}; 638 struct efa_ibv_create_qp cmd = {}; 639 struct efa_qp *qp = to_eqp(ibqp); 640 struct efa_ucontext *ucontext; 641 u16 supported_efa_flags = 0; 642 int err; 643 644 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext, 645 ibucontext); 646 647 err = efa_qp_validate_cap(dev, init_attr); 648 if (err) 649 goto err_out; 650 651 err = efa_qp_validate_attr(dev, init_attr); 652 if (err) 653 goto err_out; 654 655 if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) { 656 ibdev_dbg(&dev->ibdev, 657 "Incompatible ABI params, no input udata\n"); 658 err = -EINVAL; 659 goto err_out; 660 } 661 662 if (udata->inlen > sizeof(cmd) && 663 !ib_is_udata_cleared(udata, sizeof(cmd), 664 udata->inlen - sizeof(cmd))) { 665 ibdev_dbg(&dev->ibdev, 666 "Incompatible ABI params, unknown fields in udata\n"); 667 err = -EINVAL; 668 goto err_out; 669 } 670 671 err = ib_copy_from_udata(&cmd, udata, 672 min(sizeof(cmd), udata->inlen)); 673 if (err) { 674 ibdev_dbg(&dev->ibdev, 675 "Cannot copy udata for create_qp\n"); 676 goto err_out; 677 } 678 679 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_90)) { 680 ibdev_dbg(&dev->ibdev, 681 "Incompatible ABI params, unknown fields in udata\n"); 682 err = -EINVAL; 683 goto err_out; 684 } 685 686 if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV)) 687 supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV; 688 689 if (cmd.flags & ~supported_efa_flags) { 690 ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n", 691 cmd.flags, supported_efa_flags); 692 err = -EOPNOTSUPP; 693 goto err_out; 694 } 695 696 create_qp_params.uarn = ucontext->uarn; 697 create_qp_params.pd = to_epd(ibqp->pd)->pdn; 698 699 if (init_attr->qp_type == IB_QPT_UD) { 700 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD; 701 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) { 702 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD; 703 } else { 704 ibdev_dbg(&dev->ibdev, 705 "Unsupported qp type %d driver qp type %d\n", 706 init_attr->qp_type, cmd.driver_qp_type); 707 err = -EOPNOTSUPP; 708 goto err_out; 709 } 710 711 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n", 712 init_attr->qp_type, cmd.driver_qp_type); 713 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx; 714 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx; 715 create_qp_params.sq_depth = init_attr->cap.max_send_wr; 716 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size; 717 718 create_qp_params.rq_depth = init_attr->cap.max_recv_wr; 719 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size; 720 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes); 721 if (qp->rq_size) { 722 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr, 723 qp->rq_size, DMA_TO_DEVICE); 724 if (!qp->rq_cpu_addr) { 725 err = -ENOMEM; 726 goto err_out; 727 } 728 729 ibdev_dbg(&dev->ibdev, 730 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n", 731 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr); 732 create_qp_params.rq_base_addr = qp->rq_dma_addr; 733 } 734 735 if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV) 736 create_qp_params.unsolicited_write_recv = true; 737 738 err = efa_com_create_qp(&dev->edev, &create_qp_params, 739 &create_qp_resp); 740 if (err) 741 goto err_free_mapped; 742 743 resp.sq_db_offset = create_qp_resp.sq_db_offset; 744 resp.rq_db_offset = create_qp_resp.rq_db_offset; 745 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset; 746 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx; 747 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx; 748 749 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params, 750 &resp); 751 if (err) 752 goto err_destroy_qp; 753 754 qp->qp_handle = create_qp_resp.qp_handle; 755 qp->ibqp.qp_num = create_qp_resp.qp_num; 756 qp->max_send_wr = init_attr->cap.max_send_wr; 757 qp->max_recv_wr = init_attr->cap.max_recv_wr; 758 qp->max_send_sge = init_attr->cap.max_send_sge; 759 qp->max_recv_sge = init_attr->cap.max_recv_sge; 760 qp->max_inline_data = init_attr->cap.max_inline_data; 761 762 if (udata->outlen) { 763 err = ib_copy_to_udata(udata, &resp, 764 min(sizeof(resp), udata->outlen)); 765 if (err) { 766 ibdev_dbg(&dev->ibdev, 767 "Failed to copy udata for qp[%u]\n", 768 create_qp_resp.qp_num); 769 goto err_remove_mmap_entries; 770 } 771 } 772 773 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num); 774 775 return 0; 776 777 err_remove_mmap_entries: 778 efa_qp_user_mmap_entries_remove(qp); 779 err_destroy_qp: 780 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle); 781 err_free_mapped: 782 if (qp->rq_size) 783 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr, 784 qp->rq_size, DMA_TO_DEVICE); 785 err_out: 786 atomic64_inc(&dev->stats.create_qp_err); 787 return err; 788 } 789 790 static const struct { 791 int valid; 792 enum ib_qp_attr_mask req_param; 793 enum ib_qp_attr_mask opt_param; 794 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 795 [IB_QPS_RESET] = { 796 [IB_QPS_RESET] = { .valid = 1 }, 797 [IB_QPS_INIT] = { 798 .valid = 1, 799 .req_param = IB_QP_PKEY_INDEX | 800 IB_QP_PORT | 801 IB_QP_QKEY, 802 }, 803 }, 804 [IB_QPS_INIT] = { 805 [IB_QPS_RESET] = { .valid = 1 }, 806 [IB_QPS_ERR] = { .valid = 1 }, 807 [IB_QPS_INIT] = { 808 .valid = 1, 809 .opt_param = IB_QP_PKEY_INDEX | 810 IB_QP_PORT | 811 IB_QP_QKEY, 812 }, 813 [IB_QPS_RTR] = { 814 .valid = 1, 815 .opt_param = IB_QP_PKEY_INDEX | 816 IB_QP_QKEY, 817 }, 818 }, 819 [IB_QPS_RTR] = { 820 [IB_QPS_RESET] = { .valid = 1 }, 821 [IB_QPS_ERR] = { .valid = 1 }, 822 [IB_QPS_RTS] = { 823 .valid = 1, 824 .req_param = IB_QP_SQ_PSN, 825 .opt_param = IB_QP_CUR_STATE | 826 IB_QP_QKEY | 827 IB_QP_RNR_RETRY, 828 829 } 830 }, 831 [IB_QPS_RTS] = { 832 [IB_QPS_RESET] = { .valid = 1 }, 833 [IB_QPS_ERR] = { .valid = 1 }, 834 [IB_QPS_RTS] = { 835 .valid = 1, 836 .opt_param = IB_QP_CUR_STATE | 837 IB_QP_QKEY, 838 }, 839 [IB_QPS_SQD] = { 840 .valid = 1, 841 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY, 842 }, 843 }, 844 [IB_QPS_SQD] = { 845 [IB_QPS_RESET] = { .valid = 1 }, 846 [IB_QPS_ERR] = { .valid = 1 }, 847 [IB_QPS_RTS] = { 848 .valid = 1, 849 .opt_param = IB_QP_CUR_STATE | 850 IB_QP_QKEY, 851 }, 852 [IB_QPS_SQD] = { 853 .valid = 1, 854 .opt_param = IB_QP_PKEY_INDEX | 855 IB_QP_QKEY, 856 } 857 }, 858 [IB_QPS_SQE] = { 859 [IB_QPS_RESET] = { .valid = 1 }, 860 [IB_QPS_ERR] = { .valid = 1 }, 861 [IB_QPS_RTS] = { 862 .valid = 1, 863 .opt_param = IB_QP_CUR_STATE | 864 IB_QP_QKEY, 865 } 866 }, 867 [IB_QPS_ERR] = { 868 [IB_QPS_RESET] = { .valid = 1 }, 869 [IB_QPS_ERR] = { .valid = 1 }, 870 } 871 }; 872 873 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state, 874 enum ib_qp_state next_state, 875 enum ib_qp_attr_mask mask) 876 { 877 enum ib_qp_attr_mask req_param, opt_param; 878 879 if (mask & IB_QP_CUR_STATE && 880 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS && 881 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE) 882 return false; 883 884 if (!srd_qp_state_table[cur_state][next_state].valid) 885 return false; 886 887 req_param = srd_qp_state_table[cur_state][next_state].req_param; 888 opt_param = srd_qp_state_table[cur_state][next_state].opt_param; 889 890 if ((mask & req_param) != req_param) 891 return false; 892 893 if (mask & ~(req_param | opt_param | IB_QP_STATE)) 894 return false; 895 896 return true; 897 } 898 899 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp, 900 struct ib_qp_attr *qp_attr, int qp_attr_mask, 901 enum ib_qp_state cur_state, 902 enum ib_qp_state new_state) 903 { 904 int err; 905 906 #define EFA_MODIFY_QP_SUPP_MASK \ 907 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \ 908 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \ 909 IB_QP_RNR_RETRY) 910 911 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) { 912 ibdev_dbg(&dev->ibdev, 913 "Unsupported qp_attr_mask[%#x] supported[%#x]\n", 914 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK); 915 return -EOPNOTSUPP; 916 } 917 918 if (qp->ibqp.qp_type == IB_QPT_DRIVER) 919 err = !efa_modify_srd_qp_is_ok(cur_state, new_state, 920 qp_attr_mask); 921 else 922 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD, 923 qp_attr_mask); 924 925 if (err) { 926 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n"); 927 return -EINVAL; 928 } 929 930 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) { 931 ibdev_dbg(&dev->ibdev, "Can't change port num\n"); 932 return -EOPNOTSUPP; 933 } 934 935 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) { 936 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n"); 937 return -EOPNOTSUPP; 938 } 939 940 return 0; 941 } 942 943 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 944 int qp_attr_mask, struct ib_udata *udata) 945 { 946 struct efa_dev *dev = to_edev(ibqp->device); 947 struct efa_com_modify_qp_params params = {}; 948 struct efa_qp *qp = to_eqp(ibqp); 949 enum ib_qp_state cur_state; 950 enum ib_qp_state new_state; 951 int err; 952 953 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 954 return -EOPNOTSUPP; 955 956 if (udata->inlen && 957 !ib_is_udata_cleared(udata, 0, udata->inlen)) { 958 ibdev_dbg(&dev->ibdev, 959 "Incompatible ABI params, udata not cleared\n"); 960 return -EINVAL; 961 } 962 963 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state : 964 qp->state; 965 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state; 966 967 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state, 968 new_state); 969 if (err) 970 return err; 971 972 params.qp_handle = qp->qp_handle; 973 974 if (qp_attr_mask & IB_QP_STATE) { 975 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE, 976 1); 977 EFA_SET(¶ms.modify_mask, 978 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1); 979 params.cur_qp_state = cur_state; 980 params.qp_state = new_state; 981 } 982 983 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 984 EFA_SET(¶ms.modify_mask, 985 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1); 986 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify; 987 } 988 989 if (qp_attr_mask & IB_QP_QKEY) { 990 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1); 991 params.qkey = qp_attr->qkey; 992 } 993 994 if (qp_attr_mask & IB_QP_SQ_PSN) { 995 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1); 996 params.sq_psn = qp_attr->sq_psn; 997 } 998 999 if (qp_attr_mask & IB_QP_RNR_RETRY) { 1000 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY, 1001 1); 1002 params.rnr_retry = qp_attr->rnr_retry; 1003 } 1004 1005 err = efa_com_modify_qp(&dev->edev, ¶ms); 1006 if (err) 1007 return err; 1008 1009 qp->state = new_state; 1010 1011 return 0; 1012 } 1013 1014 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) 1015 { 1016 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx }; 1017 1018 return efa_com_destroy_cq(&dev->edev, ¶ms); 1019 } 1020 1021 static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq) 1022 { 1023 rdma_user_mmap_entry_remove(cq->db_mmap_entry); 1024 rdma_user_mmap_entry_remove(cq->mmap_entry); 1025 } 1026 1027 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) 1028 { 1029 struct efa_dev *dev = to_edev(ibcq->device); 1030 struct efa_cq *cq = to_ecq(ibcq); 1031 1032 ibdev_dbg(&dev->ibdev, 1033 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n", 1034 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); 1035 1036 efa_destroy_cq_idx(dev, cq->cq_idx); 1037 efa_cq_user_mmap_entries_remove(cq); 1038 if (cq->eq) { 1039 xa_erase(&dev->cqs_xa, cq->cq_idx); 1040 synchronize_irq(cq->eq->irq.irqn); 1041 } 1042 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, 1043 DMA_FROM_DEVICE); 1044 return 0; 1045 } 1046 1047 static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec) 1048 { 1049 return &dev->eqs[vec]; 1050 } 1051 1052 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, 1053 struct efa_ibv_create_cq_resp *resp, 1054 bool db_valid) 1055 { 1056 resp->q_mmap_size = cq->size; 1057 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, 1058 virt_to_phys(cq->cpu_addr), 1059 cq->size, EFA_MMAP_DMA_PAGE, 1060 &resp->q_mmap_key); 1061 if (!cq->mmap_entry) 1062 return -ENOMEM; 1063 1064 if (db_valid) { 1065 cq->db_mmap_entry = 1066 efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, 1067 dev->db_bar_addr + resp->db_off, 1068 PAGE_SIZE, EFA_MMAP_IO_NC, 1069 &resp->db_mmap_key); 1070 if (!cq->db_mmap_entry) { 1071 rdma_user_mmap_entry_remove(cq->mmap_entry); 1072 return -ENOMEM; 1073 } 1074 1075 resp->db_off &= ~PAGE_MASK; 1076 resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF; 1077 } 1078 1079 return 0; 1080 } 1081 1082 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 1083 struct ib_udata *udata) 1084 { 1085 struct efa_ucontext *ucontext = rdma_udata_to_drv_context( 1086 udata, struct efa_ucontext, ibucontext); 1087 struct efa_com_create_cq_params params = {}; 1088 struct efa_ibv_create_cq_resp resp = {}; 1089 struct efa_com_create_cq_result result; 1090 struct ib_device *ibdev = ibcq->device; 1091 struct efa_dev *dev = to_edev(ibdev); 1092 struct efa_ibv_create_cq cmd = {}; 1093 struct efa_cq *cq = to_ecq(ibcq); 1094 int entries = attr->cqe; 1095 bool set_src_addr; 1096 int err; 1097 1098 ibdev_dbg(ibdev, "create_cq entries %d\n", entries); 1099 1100 if (attr->flags) 1101 return -EOPNOTSUPP; 1102 1103 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) { 1104 ibdev_dbg(ibdev, 1105 "cq: requested entries[%u] non-positive or greater than max[%u]\n", 1106 entries, dev->dev_attr.max_cq_depth); 1107 err = -EINVAL; 1108 goto err_out; 1109 } 1110 1111 if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) { 1112 ibdev_dbg(ibdev, 1113 "Incompatible ABI params, no input udata\n"); 1114 err = -EINVAL; 1115 goto err_out; 1116 } 1117 1118 if (udata->inlen > sizeof(cmd) && 1119 !ib_is_udata_cleared(udata, sizeof(cmd), 1120 udata->inlen - sizeof(cmd))) { 1121 ibdev_dbg(ibdev, 1122 "Incompatible ABI params, unknown fields in udata\n"); 1123 err = -EINVAL; 1124 goto err_out; 1125 } 1126 1127 err = ib_copy_from_udata(&cmd, udata, 1128 min(sizeof(cmd), udata->inlen)); 1129 if (err) { 1130 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n"); 1131 goto err_out; 1132 } 1133 1134 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) { 1135 ibdev_dbg(ibdev, 1136 "Incompatible ABI params, unknown fields in udata\n"); 1137 err = -EINVAL; 1138 goto err_out; 1139 } 1140 1141 set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID); 1142 if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) && 1143 (set_src_addr || 1144 cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) { 1145 ibdev_dbg(ibdev, 1146 "Invalid entry size [%u]\n", cmd.cq_entry_size); 1147 err = -EINVAL; 1148 goto err_out; 1149 } 1150 1151 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) { 1152 ibdev_dbg(ibdev, 1153 "Invalid number of sub cqs[%u] expected[%u]\n", 1154 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq); 1155 err = -EINVAL; 1156 goto err_out; 1157 } 1158 1159 cq->ucontext = ucontext; 1160 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs); 1161 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size, 1162 DMA_FROM_DEVICE); 1163 if (!cq->cpu_addr) { 1164 err = -ENOMEM; 1165 goto err_out; 1166 } 1167 1168 params.uarn = cq->ucontext->uarn; 1169 params.cq_depth = entries; 1170 params.dma_addr = cq->dma_addr; 1171 params.entry_size_in_bytes = cmd.cq_entry_size; 1172 params.num_sub_cqs = cmd.num_sub_cqs; 1173 params.set_src_addr = set_src_addr; 1174 if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) { 1175 cq->eq = efa_vec2eq(dev, attr->comp_vector); 1176 params.eqn = cq->eq->eeq.eqn; 1177 params.interrupt_mode_enabled = true; 1178 } 1179 1180 err = efa_com_create_cq(&dev->edev, ¶ms, &result); 1181 if (err) 1182 goto err_free_mapped; 1183 1184 resp.db_off = result.db_off; 1185 resp.cq_idx = result.cq_idx; 1186 cq->cq_idx = result.cq_idx; 1187 cq->ibcq.cqe = result.actual_depth; 1188 WARN_ON_ONCE(entries != result.actual_depth); 1189 1190 err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid); 1191 if (err) { 1192 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n", 1193 cq->cq_idx); 1194 goto err_destroy_cq; 1195 } 1196 1197 if (cq->eq) { 1198 err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL)); 1199 if (err) { 1200 ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n", 1201 cq->cq_idx); 1202 goto err_remove_mmap; 1203 } 1204 } 1205 1206 if (udata->outlen) { 1207 err = ib_copy_to_udata(udata, &resp, 1208 min(sizeof(resp), udata->outlen)); 1209 if (err) { 1210 ibdev_dbg(ibdev, 1211 "Failed to copy udata for create_cq\n"); 1212 goto err_xa_erase; 1213 } 1214 } 1215 1216 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n", 1217 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr); 1218 1219 return 0; 1220 1221 err_xa_erase: 1222 if (cq->eq) 1223 xa_erase(&dev->cqs_xa, cq->cq_idx); 1224 err_remove_mmap: 1225 efa_cq_user_mmap_entries_remove(cq); 1226 err_destroy_cq: 1227 efa_destroy_cq_idx(dev, cq->cq_idx); 1228 err_free_mapped: 1229 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, 1230 DMA_FROM_DEVICE); 1231 1232 err_out: 1233 atomic64_inc(&dev->stats.create_cq_err); 1234 return err; 1235 } 1236 1237 static int umem_to_page_list(struct efa_dev *dev, 1238 struct ib_umem *umem, 1239 u64 *page_list, 1240 u32 hp_cnt, 1241 u8 hp_shift) 1242 { 1243 u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT); 1244 struct ib_block_iter biter; 1245 unsigned int hp_idx = 0; 1246 1247 ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n", 1248 hp_cnt, pages_in_hp); 1249 1250 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift)) 1251 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter); 1252 1253 return 0; 1254 } 1255 1256 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt) 1257 { 1258 struct scatterlist *sglist; 1259 struct page *pg; 1260 int i; 1261 1262 sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL); 1263 if (!sglist) 1264 return NULL; 1265 sg_init_table(sglist, page_cnt); 1266 for (i = 0; i < page_cnt; i++) { 1267 pg = vmalloc_to_page(buf); 1268 if (!pg) 1269 goto err; 1270 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); 1271 buf += PAGE_SIZE / sizeof(*buf); 1272 } 1273 return sglist; 1274 1275 err: 1276 kfree(sglist); 1277 return NULL; 1278 } 1279 1280 /* 1281 * create a chunk list of physical pages dma addresses from the supplied 1282 * scatter gather list 1283 */ 1284 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) 1285 { 1286 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; 1287 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages; 1288 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl; 1289 unsigned int chunk_list_size, chunk_idx, payload_idx; 1290 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt; 1291 struct efa_com_ctrl_buff_info *ctrl_buf; 1292 u64 *cur_chunk_buf, *prev_chunk_buf; 1293 struct ib_block_iter biter; 1294 dma_addr_t dma_addr; 1295 int i; 1296 1297 /* allocate a chunk list that consists of 4KB chunks */ 1298 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK); 1299 1300 chunk_list->size = chunk_list_size; 1301 chunk_list->chunks = kcalloc(chunk_list_size, 1302 sizeof(*chunk_list->chunks), 1303 GFP_KERNEL); 1304 if (!chunk_list->chunks) 1305 return -ENOMEM; 1306 1307 ibdev_dbg(&dev->ibdev, 1308 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size, 1309 page_cnt); 1310 1311 /* allocate chunk buffers: */ 1312 for (i = 0; i < chunk_list_size; i++) { 1313 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL); 1314 if (!chunk_list->chunks[i].buf) 1315 goto chunk_list_dealloc; 1316 1317 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE; 1318 } 1319 chunk_list->chunks[chunk_list_size - 1].length = 1320 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) + 1321 EFA_CHUNK_PTR_SIZE; 1322 1323 /* fill the dma addresses of sg list pages to chunks: */ 1324 chunk_idx = 0; 1325 payload_idx = 0; 1326 cur_chunk_buf = chunk_list->chunks[0].buf; 1327 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt, 1328 EFA_CHUNK_PAYLOAD_SIZE) { 1329 cur_chunk_buf[payload_idx++] = 1330 rdma_block_iter_dma_address(&biter); 1331 1332 if (payload_idx == EFA_PTRS_PER_CHUNK) { 1333 chunk_idx++; 1334 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf; 1335 payload_idx = 0; 1336 } 1337 } 1338 1339 /* map chunks to dma and fill chunks next ptrs */ 1340 for (i = chunk_list_size - 1; i >= 0; i--) { 1341 dma_addr = dma_map_single(&dev->pdev->dev, 1342 chunk_list->chunks[i].buf, 1343 chunk_list->chunks[i].length, 1344 DMA_TO_DEVICE); 1345 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) { 1346 ibdev_err(&dev->ibdev, 1347 "chunk[%u] dma_map_failed\n", i); 1348 goto chunk_list_unmap; 1349 } 1350 1351 chunk_list->chunks[i].dma_addr = dma_addr; 1352 ibdev_dbg(&dev->ibdev, 1353 "chunk[%u] mapped at [%pad]\n", i, &dma_addr); 1354 1355 if (!i) 1356 break; 1357 1358 prev_chunk_buf = chunk_list->chunks[i - 1].buf; 1359 1360 ctrl_buf = (struct efa_com_ctrl_buff_info *) 1361 &prev_chunk_buf[EFA_PTRS_PER_CHUNK]; 1362 ctrl_buf->length = chunk_list->chunks[i].length; 1363 1364 efa_com_set_dma_addr(dma_addr, 1365 &ctrl_buf->address.mem_addr_high, 1366 &ctrl_buf->address.mem_addr_low); 1367 } 1368 1369 return 0; 1370 1371 chunk_list_unmap: 1372 for (; i < chunk_list_size; i++) { 1373 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr, 1374 chunk_list->chunks[i].length, DMA_TO_DEVICE); 1375 } 1376 chunk_list_dealloc: 1377 for (i = 0; i < chunk_list_size; i++) 1378 kfree(chunk_list->chunks[i].buf); 1379 1380 kfree(chunk_list->chunks); 1381 return -ENOMEM; 1382 } 1383 1384 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl) 1385 { 1386 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; 1387 int i; 1388 1389 for (i = 0; i < chunk_list->size; i++) { 1390 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr, 1391 chunk_list->chunks[i].length, DMA_TO_DEVICE); 1392 kfree(chunk_list->chunks[i].buf); 1393 } 1394 1395 kfree(chunk_list->chunks); 1396 } 1397 1398 /* initialize pbl continuous mode: map pbl buffer to a dma address. */ 1399 static int pbl_continuous_initialize(struct efa_dev *dev, 1400 struct pbl_context *pbl) 1401 { 1402 dma_addr_t dma_addr; 1403 1404 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf, 1405 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE); 1406 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) { 1407 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n"); 1408 return -ENOMEM; 1409 } 1410 1411 pbl->phys.continuous.dma_addr = dma_addr; 1412 ibdev_dbg(&dev->ibdev, 1413 "pbl continuous - dma_addr = %pad, size[%u]\n", 1414 &dma_addr, pbl->pbl_buf_size_in_bytes); 1415 1416 return 0; 1417 } 1418 1419 /* 1420 * initialize pbl indirect mode: 1421 * create a chunk list out of the dma addresses of the physical pages of 1422 * pbl buffer. 1423 */ 1424 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) 1425 { 1426 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); 1427 struct scatterlist *sgl; 1428 int sg_dma_cnt, err; 1429 1430 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE); 1431 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages); 1432 if (!sgl) 1433 return -ENOMEM; 1434 1435 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE); 1436 if (!sg_dma_cnt) { 1437 err = -EINVAL; 1438 goto err_map; 1439 } 1440 1441 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages; 1442 pbl->phys.indirect.sgl = sgl; 1443 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt; 1444 err = pbl_chunk_list_create(dev, pbl); 1445 if (err) { 1446 ibdev_dbg(&dev->ibdev, 1447 "chunk_list creation failed[%d]\n", err); 1448 goto err_chunk; 1449 } 1450 1451 ibdev_dbg(&dev->ibdev, 1452 "pbl indirect - size[%u], chunks[%u]\n", 1453 pbl->pbl_buf_size_in_bytes, 1454 pbl->phys.indirect.chunk_list.size); 1455 1456 return 0; 1457 1458 err_chunk: 1459 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE); 1460 err_map: 1461 kfree(sgl); 1462 return err; 1463 } 1464 1465 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl) 1466 { 1467 pbl_chunk_list_destroy(dev, pbl); 1468 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl, 1469 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE); 1470 kfree(pbl->phys.indirect.sgl); 1471 } 1472 1473 /* create a page buffer list from a mapped user memory region */ 1474 static int pbl_create(struct efa_dev *dev, 1475 struct pbl_context *pbl, 1476 struct ib_umem *umem, 1477 int hp_cnt, 1478 u8 hp_shift) 1479 { 1480 int err; 1481 1482 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE; 1483 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL); 1484 if (!pbl->pbl_buf) 1485 return -ENOMEM; 1486 1487 if (is_vmalloc_addr(pbl->pbl_buf)) { 1488 pbl->physically_continuous = 0; 1489 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt, 1490 hp_shift); 1491 if (err) 1492 goto err_free; 1493 1494 err = pbl_indirect_initialize(dev, pbl); 1495 if (err) 1496 goto err_free; 1497 } else { 1498 pbl->physically_continuous = 1; 1499 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt, 1500 hp_shift); 1501 if (err) 1502 goto err_free; 1503 1504 err = pbl_continuous_initialize(dev, pbl); 1505 if (err) 1506 goto err_free; 1507 } 1508 1509 ibdev_dbg(&dev->ibdev, 1510 "user_pbl_created: user_pages[%u], continuous[%u]\n", 1511 hp_cnt, pbl->physically_continuous); 1512 1513 return 0; 1514 1515 err_free: 1516 kvfree(pbl->pbl_buf); 1517 return err; 1518 } 1519 1520 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl) 1521 { 1522 if (pbl->physically_continuous) 1523 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr, 1524 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE); 1525 else 1526 pbl_indirect_terminate(dev, pbl); 1527 1528 kvfree(pbl->pbl_buf); 1529 } 1530 1531 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr, 1532 struct efa_com_reg_mr_params *params) 1533 { 1534 int err; 1535 1536 params->inline_pbl = 1; 1537 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array, 1538 params->page_num, params->page_shift); 1539 if (err) 1540 return err; 1541 1542 ibdev_dbg(&dev->ibdev, 1543 "inline_pbl_array - pages[%u]\n", params->page_num); 1544 1545 return 0; 1546 } 1547 1548 static int efa_create_pbl(struct efa_dev *dev, 1549 struct pbl_context *pbl, 1550 struct efa_mr *mr, 1551 struct efa_com_reg_mr_params *params) 1552 { 1553 int err; 1554 1555 err = pbl_create(dev, pbl, mr->umem, params->page_num, 1556 params->page_shift); 1557 if (err) { 1558 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err); 1559 return err; 1560 } 1561 1562 params->inline_pbl = 0; 1563 params->indirect = !pbl->physically_continuous; 1564 if (pbl->physically_continuous) { 1565 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes; 1566 1567 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr, 1568 ¶ms->pbl.pbl.address.mem_addr_high, 1569 ¶ms->pbl.pbl.address.mem_addr_low); 1570 } else { 1571 params->pbl.pbl.length = 1572 pbl->phys.indirect.chunk_list.chunks[0].length; 1573 1574 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr, 1575 ¶ms->pbl.pbl.address.mem_addr_high, 1576 ¶ms->pbl.pbl.address.mem_addr_low); 1577 } 1578 1579 return 0; 1580 } 1581 1582 static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags, 1583 struct ib_udata *udata) 1584 { 1585 struct efa_dev *dev = to_edev(ibpd->device); 1586 int supp_access_flags; 1587 struct efa_mr *mr; 1588 1589 if (udata && udata->inlen && 1590 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) { 1591 ibdev_dbg(&dev->ibdev, 1592 "Incompatible ABI params, udata not cleared\n"); 1593 return ERR_PTR(-EINVAL); 1594 } 1595 1596 supp_access_flags = 1597 IB_ACCESS_LOCAL_WRITE | 1598 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0) | 1599 (EFA_DEV_CAP(dev, RDMA_WRITE) ? IB_ACCESS_REMOTE_WRITE : 0); 1600 1601 access_flags &= ~IB_ACCESS_OPTIONAL; 1602 if (access_flags & ~supp_access_flags) { 1603 ibdev_dbg(&dev->ibdev, 1604 "Unsupported access flags[%#x], supported[%#x]\n", 1605 access_flags, supp_access_flags); 1606 return ERR_PTR(-EOPNOTSUPP); 1607 } 1608 1609 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 1610 if (!mr) 1611 return ERR_PTR(-ENOMEM); 1612 1613 return mr; 1614 } 1615 1616 static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start, 1617 u64 length, u64 virt_addr, int access_flags) 1618 { 1619 struct efa_dev *dev = to_edev(ibpd->device); 1620 struct efa_com_reg_mr_params params = {}; 1621 struct efa_com_reg_mr_result result = {}; 1622 struct pbl_context pbl; 1623 unsigned int pg_sz; 1624 int inline_size; 1625 int err; 1626 1627 params.pd = to_epd(ibpd)->pdn; 1628 params.iova = virt_addr; 1629 params.mr_length_in_bytes = length; 1630 params.permissions = access_flags; 1631 1632 pg_sz = ib_umem_find_best_pgsz(mr->umem, 1633 dev->dev_attr.page_size_cap, 1634 virt_addr); 1635 if (!pg_sz) { 1636 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n", 1637 dev->dev_attr.page_size_cap); 1638 return -EOPNOTSUPP; 1639 } 1640 1641 params.page_shift = order_base_2(pg_sz); 1642 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz); 1643 1644 ibdev_dbg(&dev->ibdev, 1645 "start %#llx length %#llx params.page_shift %u params.page_num %u\n", 1646 start, length, params.page_shift, params.page_num); 1647 1648 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array); 1649 if (params.page_num <= inline_size) { 1650 err = efa_create_inline_pbl(dev, mr, ¶ms); 1651 if (err) 1652 return err; 1653 1654 err = efa_com_register_mr(&dev->edev, ¶ms, &result); 1655 if (err) 1656 return err; 1657 } else { 1658 err = efa_create_pbl(dev, &pbl, mr, ¶ms); 1659 if (err) 1660 return err; 1661 1662 err = efa_com_register_mr(&dev->edev, ¶ms, &result); 1663 pbl_destroy(dev, &pbl); 1664 1665 if (err) 1666 return err; 1667 } 1668 1669 mr->ibmr.lkey = result.l_key; 1670 mr->ibmr.rkey = result.r_key; 1671 mr->ibmr.length = length; 1672 mr->ic_info.recv_ic_id = result.ic_info.recv_ic_id; 1673 mr->ic_info.rdma_read_ic_id = result.ic_info.rdma_read_ic_id; 1674 mr->ic_info.rdma_recv_ic_id = result.ic_info.rdma_recv_ic_id; 1675 mr->ic_info.recv_ic_id_valid = result.ic_info.recv_ic_id_valid; 1676 mr->ic_info.rdma_read_ic_id_valid = result.ic_info.rdma_read_ic_id_valid; 1677 mr->ic_info.rdma_recv_ic_id_valid = result.ic_info.rdma_recv_ic_id_valid; 1678 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey); 1679 1680 return 0; 1681 } 1682 1683 struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, 1684 u64 length, u64 virt_addr, 1685 int fd, int access_flags, 1686 struct ib_udata *udata) 1687 { 1688 struct efa_dev *dev = to_edev(ibpd->device); 1689 struct ib_umem_dmabuf *umem_dmabuf; 1690 struct efa_mr *mr; 1691 int err; 1692 1693 mr = efa_alloc_mr(ibpd, access_flags, udata); 1694 if (IS_ERR(mr)) { 1695 err = PTR_ERR(mr); 1696 goto err_out; 1697 } 1698 1699 umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd, 1700 access_flags); 1701 if (IS_ERR(umem_dmabuf)) { 1702 err = PTR_ERR(umem_dmabuf); 1703 ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err); 1704 goto err_free; 1705 } 1706 1707 mr->umem = &umem_dmabuf->umem; 1708 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); 1709 if (err) 1710 goto err_release; 1711 1712 return &mr->ibmr; 1713 1714 err_release: 1715 ib_umem_release(mr->umem); 1716 err_free: 1717 kfree(mr); 1718 err_out: 1719 atomic64_inc(&dev->stats.reg_mr_err); 1720 return ERR_PTR(err); 1721 } 1722 1723 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, 1724 u64 virt_addr, int access_flags, 1725 struct ib_udata *udata) 1726 { 1727 struct efa_dev *dev = to_edev(ibpd->device); 1728 struct efa_mr *mr; 1729 int err; 1730 1731 mr = efa_alloc_mr(ibpd, access_flags, udata); 1732 if (IS_ERR(mr)) { 1733 err = PTR_ERR(mr); 1734 goto err_out; 1735 } 1736 1737 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); 1738 if (IS_ERR(mr->umem)) { 1739 err = PTR_ERR(mr->umem); 1740 ibdev_dbg(&dev->ibdev, 1741 "Failed to pin and map user space memory[%d]\n", err); 1742 goto err_free; 1743 } 1744 1745 err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); 1746 if (err) 1747 goto err_release; 1748 1749 return &mr->ibmr; 1750 1751 err_release: 1752 ib_umem_release(mr->umem); 1753 err_free: 1754 kfree(mr); 1755 err_out: 1756 atomic64_inc(&dev->stats.reg_mr_err); 1757 return ERR_PTR(err); 1758 } 1759 1760 static int UVERBS_HANDLER(EFA_IB_METHOD_MR_QUERY)(struct uverbs_attr_bundle *attrs) 1761 { 1762 struct ib_mr *ibmr = uverbs_attr_get_obj(attrs, EFA_IB_ATTR_QUERY_MR_HANDLE); 1763 struct efa_mr *mr = to_emr(ibmr); 1764 u16 ic_id_validity = 0; 1765 int ret; 1766 1767 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID, 1768 &mr->ic_info.recv_ic_id, sizeof(mr->ic_info.recv_ic_id)); 1769 if (ret) 1770 return ret; 1771 1772 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID, 1773 &mr->ic_info.rdma_read_ic_id, sizeof(mr->ic_info.rdma_read_ic_id)); 1774 if (ret) 1775 return ret; 1776 1777 ret = uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID, 1778 &mr->ic_info.rdma_recv_ic_id, sizeof(mr->ic_info.rdma_recv_ic_id)); 1779 if (ret) 1780 return ret; 1781 1782 if (mr->ic_info.recv_ic_id_valid) 1783 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RECV_IC_ID; 1784 if (mr->ic_info.rdma_read_ic_id_valid) 1785 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID; 1786 if (mr->ic_info.rdma_recv_ic_id_valid) 1787 ic_id_validity |= EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID; 1788 1789 return uverbs_copy_to(attrs, EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY, 1790 &ic_id_validity, sizeof(ic_id_validity)); 1791 } 1792 1793 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1794 { 1795 struct efa_dev *dev = to_edev(ibmr->device); 1796 struct efa_com_dereg_mr_params params; 1797 struct efa_mr *mr = to_emr(ibmr); 1798 int err; 1799 1800 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey); 1801 1802 params.l_key = mr->ibmr.lkey; 1803 err = efa_com_dereg_mr(&dev->edev, ¶ms); 1804 if (err) 1805 return err; 1806 1807 ib_umem_release(mr->umem); 1808 kfree(mr); 1809 1810 return 0; 1811 } 1812 1813 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num, 1814 struct ib_port_immutable *immutable) 1815 { 1816 struct ib_port_attr attr; 1817 int err; 1818 1819 err = ib_query_port(ibdev, port_num, &attr); 1820 if (err) { 1821 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err); 1822 return err; 1823 } 1824 1825 immutable->pkey_tbl_len = attr.pkey_tbl_len; 1826 immutable->gid_tbl_len = attr.gid_tbl_len; 1827 1828 return 0; 1829 } 1830 1831 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn) 1832 { 1833 struct efa_com_dealloc_uar_params params = { 1834 .uarn = uarn, 1835 }; 1836 1837 return efa_com_dealloc_uar(&dev->edev, ¶ms); 1838 } 1839 1840 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \ 1841 (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \ 1842 NULL : #_attr) 1843 1844 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext, 1845 const struct efa_ibv_alloc_ucontext_cmd *cmd) 1846 { 1847 struct efa_dev *dev = to_edev(ibucontext->device); 1848 char *attr_str; 1849 1850 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch, 1851 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str)) 1852 goto err; 1853 1854 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth, 1855 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR, 1856 attr_str)) 1857 goto err; 1858 1859 return 0; 1860 1861 err: 1862 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n", 1863 attr_str); 1864 return -EOPNOTSUPP; 1865 } 1866 1867 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) 1868 { 1869 struct efa_ucontext *ucontext = to_eucontext(ibucontext); 1870 struct efa_dev *dev = to_edev(ibucontext->device); 1871 struct efa_ibv_alloc_ucontext_resp resp = {}; 1872 struct efa_ibv_alloc_ucontext_cmd cmd = {}; 1873 struct efa_com_alloc_uar_result result; 1874 int err; 1875 1876 /* 1877 * it's fine if the driver does not know all request fields, 1878 * we will ack input fields in our response. 1879 */ 1880 1881 err = ib_copy_from_udata(&cmd, udata, 1882 min(sizeof(cmd), udata->inlen)); 1883 if (err) { 1884 ibdev_dbg(&dev->ibdev, 1885 "Cannot copy udata for alloc_ucontext\n"); 1886 goto err_out; 1887 } 1888 1889 err = efa_user_comp_handshake(ibucontext, &cmd); 1890 if (err) 1891 goto err_out; 1892 1893 err = efa_com_alloc_uar(&dev->edev, &result); 1894 if (err) 1895 goto err_out; 1896 1897 ucontext->uarn = result.uarn; 1898 1899 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE; 1900 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH; 1901 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq; 1902 resp.inline_buf_size = dev->dev_attr.inline_buf_size; 1903 resp.max_llq_size = dev->dev_attr.max_llq_size; 1904 resp.max_tx_batch = dev->dev_attr.max_tx_batch; 1905 resp.min_sq_wr = dev->dev_attr.min_sq_depth; 1906 1907 err = ib_copy_to_udata(udata, &resp, 1908 min(sizeof(resp), udata->outlen)); 1909 if (err) 1910 goto err_dealloc_uar; 1911 1912 return 0; 1913 1914 err_dealloc_uar: 1915 efa_dealloc_uar(dev, result.uarn); 1916 err_out: 1917 atomic64_inc(&dev->stats.alloc_ucontext_err); 1918 return err; 1919 } 1920 1921 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext) 1922 { 1923 struct efa_ucontext *ucontext = to_eucontext(ibucontext); 1924 struct efa_dev *dev = to_edev(ibucontext->device); 1925 1926 efa_dealloc_uar(dev, ucontext->uarn); 1927 } 1928 1929 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 1930 { 1931 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry); 1932 1933 kfree(entry); 1934 } 1935 1936 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext, 1937 struct vm_area_struct *vma) 1938 { 1939 struct rdma_user_mmap_entry *rdma_entry; 1940 struct efa_user_mmap_entry *entry; 1941 unsigned long va; 1942 int err = 0; 1943 u64 pfn; 1944 1945 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma); 1946 if (!rdma_entry) { 1947 ibdev_dbg(&dev->ibdev, 1948 "pgoff[%#lx] does not have valid entry\n", 1949 vma->vm_pgoff); 1950 atomic64_inc(&dev->stats.mmap_err); 1951 return -EINVAL; 1952 } 1953 entry = to_emmap(rdma_entry); 1954 1955 ibdev_dbg(&dev->ibdev, 1956 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n", 1957 entry->address, rdma_entry->npages * PAGE_SIZE, 1958 entry->mmap_flag); 1959 1960 pfn = entry->address >> PAGE_SHIFT; 1961 switch (entry->mmap_flag) { 1962 case EFA_MMAP_IO_NC: 1963 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, 1964 entry->rdma_entry.npages * PAGE_SIZE, 1965 pgprot_noncached(vma->vm_page_prot), 1966 rdma_entry); 1967 break; 1968 case EFA_MMAP_IO_WC: 1969 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, 1970 entry->rdma_entry.npages * PAGE_SIZE, 1971 pgprot_writecombine(vma->vm_page_prot), 1972 rdma_entry); 1973 break; 1974 case EFA_MMAP_DMA_PAGE: 1975 for (va = vma->vm_start; va < vma->vm_end; 1976 va += PAGE_SIZE, pfn++) { 1977 err = vm_insert_page(vma, va, pfn_to_page(pfn)); 1978 if (err) 1979 break; 1980 } 1981 break; 1982 default: 1983 err = -EINVAL; 1984 } 1985 1986 if (err) { 1987 ibdev_dbg( 1988 &dev->ibdev, 1989 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n", 1990 entry->address, rdma_entry->npages * PAGE_SIZE, 1991 entry->mmap_flag, err); 1992 atomic64_inc(&dev->stats.mmap_err); 1993 } 1994 1995 rdma_user_mmap_entry_put(rdma_entry); 1996 return err; 1997 } 1998 1999 int efa_mmap(struct ib_ucontext *ibucontext, 2000 struct vm_area_struct *vma) 2001 { 2002 struct efa_ucontext *ucontext = to_eucontext(ibucontext); 2003 struct efa_dev *dev = to_edev(ibucontext->device); 2004 size_t length = vma->vm_end - vma->vm_start; 2005 2006 ibdev_dbg(&dev->ibdev, 2007 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n", 2008 vma->vm_start, vma->vm_end, length, vma->vm_pgoff); 2009 2010 return __efa_mmap(dev, ucontext, vma); 2011 } 2012 2013 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah) 2014 { 2015 struct efa_com_destroy_ah_params params = { 2016 .ah = ah->ah, 2017 .pdn = to_epd(ah->ibah.pd)->pdn, 2018 }; 2019 2020 return efa_com_destroy_ah(&dev->edev, ¶ms); 2021 } 2022 2023 int efa_create_ah(struct ib_ah *ibah, 2024 struct rdma_ah_init_attr *init_attr, 2025 struct ib_udata *udata) 2026 { 2027 struct rdma_ah_attr *ah_attr = init_attr->ah_attr; 2028 struct efa_dev *dev = to_edev(ibah->device); 2029 struct efa_com_create_ah_params params = {}; 2030 struct efa_ibv_create_ah_resp resp = {}; 2031 struct efa_com_create_ah_result result; 2032 struct efa_ah *ah = to_eah(ibah); 2033 int err; 2034 2035 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) { 2036 ibdev_dbg(&dev->ibdev, 2037 "Create address handle is not supported in atomic context\n"); 2038 err = -EOPNOTSUPP; 2039 goto err_out; 2040 } 2041 2042 if (udata->inlen && 2043 !ib_is_udata_cleared(udata, 0, udata->inlen)) { 2044 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n"); 2045 err = -EINVAL; 2046 goto err_out; 2047 } 2048 2049 memcpy(params.dest_addr, ah_attr->grh.dgid.raw, 2050 sizeof(params.dest_addr)); 2051 params.pdn = to_epd(ibah->pd)->pdn; 2052 err = efa_com_create_ah(&dev->edev, ¶ms, &result); 2053 if (err) 2054 goto err_out; 2055 2056 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id)); 2057 ah->ah = result.ah; 2058 2059 resp.efa_address_handle = result.ah; 2060 2061 if (udata->outlen) { 2062 err = ib_copy_to_udata(udata, &resp, 2063 min(sizeof(resp), udata->outlen)); 2064 if (err) { 2065 ibdev_dbg(&dev->ibdev, 2066 "Failed to copy udata for create_ah response\n"); 2067 goto err_destroy_ah; 2068 } 2069 } 2070 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah); 2071 2072 return 0; 2073 2074 err_destroy_ah: 2075 efa_ah_destroy(dev, ah); 2076 err_out: 2077 atomic64_inc(&dev->stats.create_ah_err); 2078 return err; 2079 } 2080 2081 int efa_destroy_ah(struct ib_ah *ibah, u32 flags) 2082 { 2083 struct efa_dev *dev = to_edev(ibah->pd->device); 2084 struct efa_ah *ah = to_eah(ibah); 2085 2086 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah); 2087 2088 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) { 2089 ibdev_dbg(&dev->ibdev, 2090 "Destroy address handle is not supported in atomic context\n"); 2091 return -EOPNOTSUPP; 2092 } 2093 2094 efa_ah_destroy(dev, ah); 2095 return 0; 2096 } 2097 2098 struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, 2099 u32 port_num) 2100 { 2101 return rdma_alloc_hw_stats_struct(efa_port_stats_descs, 2102 ARRAY_SIZE(efa_port_stats_descs), 2103 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2104 } 2105 2106 struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev) 2107 { 2108 return rdma_alloc_hw_stats_struct(efa_device_stats_descs, 2109 ARRAY_SIZE(efa_device_stats_descs), 2110 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2111 } 2112 2113 static int efa_fill_device_stats(struct efa_dev *dev, 2114 struct rdma_hw_stats *stats) 2115 { 2116 struct efa_com_stats_admin *as = &dev->edev.aq.stats; 2117 struct efa_stats *s = &dev->stats; 2118 2119 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd); 2120 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd); 2121 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err); 2122 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion); 2123 2124 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd); 2125 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err); 2126 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err); 2127 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err); 2128 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err); 2129 stats->value[EFA_ALLOC_UCONTEXT_ERR] = 2130 atomic64_read(&s->alloc_ucontext_err); 2131 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err); 2132 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err); 2133 2134 return ARRAY_SIZE(efa_device_stats_descs); 2135 } 2136 2137 static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats, 2138 u32 port_num) 2139 { 2140 struct efa_com_get_stats_params params = {}; 2141 union efa_com_get_stats_result result; 2142 struct efa_com_rdma_write_stats *rws; 2143 struct efa_com_rdma_read_stats *rrs; 2144 struct efa_com_messages_stats *ms; 2145 struct efa_com_basic_stats *bs; 2146 int err; 2147 2148 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL; 2149 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC; 2150 2151 err = efa_com_get_stats(&dev->edev, ¶ms, &result); 2152 if (err) 2153 return err; 2154 2155 bs = &result.basic_stats; 2156 stats->value[EFA_TX_BYTES] = bs->tx_bytes; 2157 stats->value[EFA_TX_PKTS] = bs->tx_pkts; 2158 stats->value[EFA_RX_BYTES] = bs->rx_bytes; 2159 stats->value[EFA_RX_PKTS] = bs->rx_pkts; 2160 stats->value[EFA_RX_DROPS] = bs->rx_drops; 2161 2162 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES; 2163 err = efa_com_get_stats(&dev->edev, ¶ms, &result); 2164 if (err) 2165 return err; 2166 2167 ms = &result.messages_stats; 2168 stats->value[EFA_SEND_BYTES] = ms->send_bytes; 2169 stats->value[EFA_SEND_WRS] = ms->send_wrs; 2170 stats->value[EFA_RECV_BYTES] = ms->recv_bytes; 2171 stats->value[EFA_RECV_WRS] = ms->recv_wrs; 2172 2173 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ; 2174 err = efa_com_get_stats(&dev->edev, ¶ms, &result); 2175 if (err) 2176 return err; 2177 2178 rrs = &result.rdma_read_stats; 2179 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs; 2180 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes; 2181 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err; 2182 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes; 2183 2184 if (EFA_DEV_CAP(dev, RDMA_WRITE)) { 2185 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE; 2186 err = efa_com_get_stats(&dev->edev, ¶ms, &result); 2187 if (err) 2188 return err; 2189 2190 rws = &result.rdma_write_stats; 2191 stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs; 2192 stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes; 2193 stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err; 2194 stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes; 2195 } 2196 2197 return ARRAY_SIZE(efa_port_stats_descs); 2198 } 2199 2200 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, 2201 u32 port_num, int index) 2202 { 2203 if (port_num) 2204 return efa_fill_port_stats(to_edev(ibdev), stats, port_num); 2205 else 2206 return efa_fill_device_stats(to_edev(ibdev), stats); 2207 } 2208 2209 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, 2210 u32 port_num) 2211 { 2212 return IB_LINK_LAYER_UNSPECIFIED; 2213 } 2214 2215 DECLARE_UVERBS_NAMED_METHOD(EFA_IB_METHOD_MR_QUERY, 2216 UVERBS_ATTR_IDR(EFA_IB_ATTR_QUERY_MR_HANDLE, 2217 UVERBS_OBJECT_MR, 2218 UVERBS_ACCESS_READ, 2219 UA_MANDATORY), 2220 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY, 2221 UVERBS_ATTR_TYPE(u16), 2222 UA_MANDATORY), 2223 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID, 2224 UVERBS_ATTR_TYPE(u16), 2225 UA_MANDATORY), 2226 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID, 2227 UVERBS_ATTR_TYPE(u16), 2228 UA_MANDATORY), 2229 UVERBS_ATTR_PTR_OUT(EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID, 2230 UVERBS_ATTR_TYPE(u16), 2231 UA_MANDATORY)); 2232 2233 ADD_UVERBS_METHODS(efa_mr, 2234 UVERBS_OBJECT_MR, 2235 &UVERBS_METHOD(EFA_IB_METHOD_MR_QUERY)); 2236 2237 const struct uapi_definition efa_uapi_defs[] = { 2238 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_MR, 2239 &efa_mr), 2240 {}, 2241 }; 2242