1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved. 4 */ 5 6 #include "mana_ib.h" 7 8 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, 9 u32 port) 10 { 11 struct mana_port_context *mpc; 12 struct net_device *ndev; 13 14 ndev = mana_ib_get_netdev(&dev->ib_dev, port); 15 mpc = netdev_priv(ndev); 16 17 mutex_lock(&pd->vport_mutex); 18 19 pd->vport_use_count--; 20 WARN_ON(pd->vport_use_count < 0); 21 22 if (!pd->vport_use_count) 23 mana_uncfg_vport(mpc); 24 25 mutex_unlock(&pd->vport_mutex); 26 } 27 28 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd, 29 u32 doorbell_id) 30 { 31 struct mana_port_context *mpc; 32 struct net_device *ndev; 33 int err; 34 35 ndev = mana_ib_get_netdev(&dev->ib_dev, port); 36 mpc = netdev_priv(ndev); 37 38 mutex_lock(&pd->vport_mutex); 39 40 pd->vport_use_count++; 41 if (pd->vport_use_count > 1) { 42 ibdev_dbg(&dev->ib_dev, 43 "Skip as this PD is already configured vport\n"); 44 mutex_unlock(&pd->vport_mutex); 45 return 0; 46 } 47 48 err = mana_cfg_vport(mpc, pd->pdn, doorbell_id); 49 if (err) { 50 pd->vport_use_count--; 51 mutex_unlock(&pd->vport_mutex); 52 53 ibdev_dbg(&dev->ib_dev, "Failed to configure vPort %d\n", err); 54 return err; 55 } 56 57 mutex_unlock(&pd->vport_mutex); 58 59 pd->tx_shortform_allowed = mpc->tx_shortform_allowed; 60 pd->tx_vp_offset = mpc->tx_vp_offset; 61 62 ibdev_dbg(&dev->ib_dev, "vport handle %llx pdid %x doorbell_id %x\n", 63 mpc->port_handle, pd->pdn, doorbell_id); 64 65 return 0; 66 } 67 68 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 69 { 70 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); 71 struct ib_device *ibdev = ibpd->device; 72 struct gdma_create_pd_resp resp = {}; 73 struct gdma_create_pd_req req = {}; 74 enum gdma_pd_flags flags = 0; 75 struct mana_ib_dev *dev; 76 struct gdma_context *gc; 77 int err; 78 79 dev = container_of(ibdev, struct mana_ib_dev, ib_dev); 80 gc = mdev_to_gc(dev); 81 82 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req), 83 sizeof(resp)); 84 85 req.flags = flags; 86 err = mana_gd_send_request(gc, sizeof(req), &req, 87 sizeof(resp), &resp); 88 89 if (err || resp.hdr.status) { 90 ibdev_dbg(&dev->ib_dev, 91 "Failed to get pd_id err %d status %u\n", err, 92 resp.hdr.status); 93 if (!err) 94 err = -EPROTO; 95 96 return err; 97 } 98 99 pd->pd_handle = resp.pd_handle; 100 pd->pdn = resp.pd_id; 101 ibdev_dbg(&dev->ib_dev, "pd_handle 0x%llx pd_id %d\n", 102 pd->pd_handle, pd->pdn); 103 104 mutex_init(&pd->vport_mutex); 105 pd->vport_use_count = 0; 106 return 0; 107 } 108 109 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 110 { 111 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); 112 struct ib_device *ibdev = ibpd->device; 113 struct gdma_destory_pd_resp resp = {}; 114 struct gdma_destroy_pd_req req = {}; 115 struct mana_ib_dev *dev; 116 struct gdma_context *gc; 117 int err; 118 119 dev = container_of(ibdev, struct mana_ib_dev, ib_dev); 120 gc = mdev_to_gc(dev); 121 122 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req), 123 sizeof(resp)); 124 125 req.pd_handle = pd->pd_handle; 126 err = mana_gd_send_request(gc, sizeof(req), &req, 127 sizeof(resp), &resp); 128 129 if (err || resp.hdr.status) { 130 ibdev_dbg(&dev->ib_dev, 131 "Failed to destroy pd_handle 0x%llx err %d status %u", 132 pd->pd_handle, err, resp.hdr.status); 133 if (!err) 134 err = -EPROTO; 135 } 136 137 return err; 138 } 139 140 static int mana_gd_destroy_doorbell_page(struct gdma_context *gc, 141 int doorbell_page) 142 { 143 struct gdma_destroy_resource_range_req req = {}; 144 struct gdma_resp_hdr resp = {}; 145 int err; 146 147 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE, 148 sizeof(req), sizeof(resp)); 149 150 req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; 151 req.num_resources = 1; 152 req.allocated_resources = doorbell_page; 153 154 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 155 if (err || resp.status) { 156 dev_err(gc->dev, 157 "Failed to destroy doorbell page: ret %d, 0x%x\n", 158 err, resp.status); 159 return err ?: -EPROTO; 160 } 161 162 return 0; 163 } 164 165 static int mana_gd_allocate_doorbell_page(struct gdma_context *gc, 166 int *doorbell_page) 167 { 168 struct gdma_allocate_resource_range_req req = {}; 169 struct gdma_allocate_resource_range_resp resp = {}; 170 int err; 171 172 mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE, 173 sizeof(req), sizeof(resp)); 174 175 req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; 176 req.num_resources = 1; 177 req.alignment = 1; 178 179 /* Have GDMA start searching from 0 */ 180 req.allocated_resources = 0; 181 182 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 183 if (err || resp.hdr.status) { 184 dev_err(gc->dev, 185 "Failed to allocate doorbell page: ret %d, 0x%x\n", 186 err, resp.hdr.status); 187 return err ?: -EPROTO; 188 } 189 190 *doorbell_page = resp.allocated_resources; 191 192 return 0; 193 } 194 195 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext, 196 struct ib_udata *udata) 197 { 198 struct mana_ib_ucontext *ucontext = 199 container_of(ibcontext, struct mana_ib_ucontext, ibucontext); 200 struct ib_device *ibdev = ibcontext->device; 201 struct mana_ib_dev *mdev; 202 struct gdma_context *gc; 203 int doorbell_page; 204 int ret; 205 206 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); 207 gc = mdev_to_gc(mdev); 208 209 /* Allocate a doorbell page index */ 210 ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page); 211 if (ret) { 212 ibdev_dbg(ibdev, "Failed to allocate doorbell page %d\n", ret); 213 return ret; 214 } 215 216 ibdev_dbg(ibdev, "Doorbell page allocated %d\n", doorbell_page); 217 218 ucontext->doorbell = doorbell_page; 219 220 return 0; 221 } 222 223 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 224 { 225 struct mana_ib_ucontext *mana_ucontext = 226 container_of(ibcontext, struct mana_ib_ucontext, ibucontext); 227 struct ib_device *ibdev = ibcontext->device; 228 struct mana_ib_dev *mdev; 229 struct gdma_context *gc; 230 int ret; 231 232 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); 233 gc = mdev_to_gc(mdev); 234 235 ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell); 236 if (ret) 237 ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); 238 } 239 240 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, 241 struct mana_ib_queue *queue) 242 { 243 struct ib_umem *umem; 244 int err; 245 246 queue->umem = NULL; 247 queue->id = INVALID_QUEUE_ID; 248 queue->gdma_region = GDMA_INVALID_DMA_REGION; 249 250 umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE); 251 if (IS_ERR(umem)) { 252 err = PTR_ERR(umem); 253 ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err); 254 return err; 255 } 256 257 err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region); 258 if (err) { 259 ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err); 260 goto free_umem; 261 } 262 queue->umem = umem; 263 264 ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region); 265 266 return 0; 267 free_umem: 268 ib_umem_release(umem); 269 return err; 270 } 271 272 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue) 273 { 274 /* Ignore return code as there is not much we can do about it. 275 * The error message is printed inside. 276 */ 277 mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region); 278 ib_umem_release(queue->umem); 279 } 280 281 static int 282 mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, 283 struct gdma_context *gc, 284 struct gdma_create_dma_region_req *create_req, 285 size_t num_pages, mana_handle_t *gdma_region, 286 u32 expected_status) 287 { 288 struct gdma_create_dma_region_resp create_resp = {}; 289 unsigned int create_req_msg_size; 290 int err; 291 292 create_req_msg_size = 293 struct_size(create_req, page_addr_list, num_pages); 294 create_req->page_addr_list_len = num_pages; 295 296 err = mana_gd_send_request(gc, create_req_msg_size, create_req, 297 sizeof(create_resp), &create_resp); 298 if (err || create_resp.hdr.status != expected_status) { 299 ibdev_dbg(&dev->ib_dev, 300 "Failed to create DMA region: %d, 0x%x\n", 301 err, create_resp.hdr.status); 302 if (!err) 303 err = -EPROTO; 304 305 return err; 306 } 307 308 *gdma_region = create_resp.dma_region_handle; 309 ibdev_dbg(&dev->ib_dev, "Created DMA region handle 0x%llx\n", 310 *gdma_region); 311 312 return 0; 313 } 314 315 static int 316 mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, 317 struct gdma_dma_region_add_pages_req *add_req, 318 unsigned int num_pages, u32 expected_status) 319 { 320 unsigned int add_req_msg_size = 321 struct_size(add_req, page_addr_list, num_pages); 322 struct gdma_general_resp add_resp = {}; 323 int err; 324 325 mana_gd_init_req_hdr(&add_req->hdr, GDMA_DMA_REGION_ADD_PAGES, 326 add_req_msg_size, sizeof(add_resp)); 327 add_req->page_addr_list_len = num_pages; 328 329 err = mana_gd_send_request(gc, add_req_msg_size, add_req, 330 sizeof(add_resp), &add_resp); 331 if (err || add_resp.hdr.status != expected_status) { 332 ibdev_dbg(&dev->ib_dev, 333 "Failed to create DMA region: %d, 0x%x\n", 334 err, add_resp.hdr.status); 335 336 if (!err) 337 err = -EPROTO; 338 339 return err; 340 } 341 342 return 0; 343 } 344 345 static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, 346 mana_handle_t *gdma_region, unsigned long page_sz) 347 { 348 struct gdma_dma_region_add_pages_req *add_req = NULL; 349 size_t num_pages_processed = 0, num_pages_to_handle; 350 struct gdma_create_dma_region_req *create_req; 351 unsigned int create_req_msg_size; 352 struct hw_channel_context *hwc; 353 struct ib_block_iter biter; 354 size_t max_pgs_add_cmd = 0; 355 size_t max_pgs_create_cmd; 356 struct gdma_context *gc; 357 size_t num_pages_total; 358 unsigned int tail = 0; 359 u64 *page_addr_list; 360 void *request_buf; 361 int err; 362 363 gc = mdev_to_gc(dev); 364 hwc = gc->hwc.driver_data; 365 366 num_pages_total = ib_umem_num_dma_blocks(umem, page_sz); 367 368 max_pgs_create_cmd = 369 (hwc->max_req_msg_size - sizeof(*create_req)) / sizeof(u64); 370 num_pages_to_handle = 371 min_t(size_t, num_pages_total, max_pgs_create_cmd); 372 create_req_msg_size = 373 struct_size(create_req, page_addr_list, num_pages_to_handle); 374 375 request_buf = kzalloc(hwc->max_req_msg_size, GFP_KERNEL); 376 if (!request_buf) 377 return -ENOMEM; 378 379 create_req = request_buf; 380 mana_gd_init_req_hdr(&create_req->hdr, GDMA_CREATE_DMA_REGION, 381 create_req_msg_size, 382 sizeof(struct gdma_create_dma_region_resp)); 383 384 create_req->length = umem->length; 385 create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz); 386 create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT; 387 create_req->page_count = num_pages_total; 388 389 ibdev_dbg(&dev->ib_dev, "size_dma_region %lu num_pages_total %lu\n", 390 umem->length, num_pages_total); 391 392 ibdev_dbg(&dev->ib_dev, "page_sz %lu offset_in_page %u\n", 393 page_sz, create_req->offset_in_page); 394 395 ibdev_dbg(&dev->ib_dev, "num_pages_to_handle %lu, gdma_page_type %u", 396 num_pages_to_handle, create_req->gdma_page_type); 397 398 page_addr_list = create_req->page_addr_list; 399 rdma_umem_for_each_dma_block(umem, &biter, page_sz) { 400 u32 expected_status = 0; 401 402 page_addr_list[tail++] = rdma_block_iter_dma_address(&biter); 403 if (tail < num_pages_to_handle) 404 continue; 405 406 if (num_pages_processed + num_pages_to_handle < 407 num_pages_total) 408 expected_status = GDMA_STATUS_MORE_ENTRIES; 409 410 if (!num_pages_processed) { 411 /* First create message */ 412 err = mana_ib_gd_first_dma_region(dev, gc, create_req, 413 tail, gdma_region, 414 expected_status); 415 if (err) 416 goto out; 417 418 max_pgs_add_cmd = (hwc->max_req_msg_size - 419 sizeof(*add_req)) / sizeof(u64); 420 421 add_req = request_buf; 422 add_req->dma_region_handle = *gdma_region; 423 add_req->reserved3 = 0; 424 page_addr_list = add_req->page_addr_list; 425 } else { 426 /* Subsequent create messages */ 427 err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail, 428 expected_status); 429 if (err) 430 break; 431 } 432 433 num_pages_processed += tail; 434 tail = 0; 435 436 /* The remaining pages to create */ 437 num_pages_to_handle = 438 min_t(size_t, 439 num_pages_total - num_pages_processed, 440 max_pgs_add_cmd); 441 } 442 443 if (err) 444 mana_ib_gd_destroy_dma_region(dev, *gdma_region); 445 446 out: 447 kfree(request_buf); 448 return err; 449 } 450 451 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, 452 mana_handle_t *gdma_region, u64 virt) 453 { 454 unsigned long page_sz; 455 456 page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt); 457 if (!page_sz) { 458 ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n"); 459 return -EINVAL; 460 } 461 462 return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz); 463 } 464 465 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, 466 mana_handle_t *gdma_region) 467 { 468 unsigned long page_sz; 469 470 /* Hardware requires dma region to align to chosen page size */ 471 page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0); 472 if (!page_sz) { 473 ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n"); 474 return -EINVAL; 475 } 476 477 return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz); 478 } 479 480 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region) 481 { 482 struct gdma_context *gc = mdev_to_gc(dev); 483 484 ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region); 485 486 return mana_gd_destroy_dma_region(gc, gdma_region); 487 } 488 489 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 490 { 491 struct mana_ib_ucontext *mana_ucontext = 492 container_of(ibcontext, struct mana_ib_ucontext, ibucontext); 493 struct ib_device *ibdev = ibcontext->device; 494 struct mana_ib_dev *mdev; 495 struct gdma_context *gc; 496 phys_addr_t pfn; 497 pgprot_t prot; 498 int ret; 499 500 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); 501 gc = mdev_to_gc(mdev); 502 503 if (vma->vm_pgoff != 0) { 504 ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff); 505 return -EINVAL; 506 } 507 508 /* Map to the page indexed by ucontext->doorbell */ 509 pfn = (gc->phys_db_page_base + 510 gc->db_page_size * mana_ucontext->doorbell) >> 511 PAGE_SHIFT; 512 prot = pgprot_writecombine(vma->vm_page_prot); 513 514 ret = rdma_user_mmap_io(ibcontext, vma, pfn, gc->db_page_size, prot, 515 NULL); 516 if (ret) 517 ibdev_dbg(ibdev, "can't rdma_user_mmap_io ret %d\n", ret); 518 else 519 ibdev_dbg(ibdev, "mapped I/O pfn 0x%llx page_size %u, ret %d\n", 520 pfn, gc->db_page_size, ret); 521 522 return ret; 523 } 524 525 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, 526 struct ib_port_immutable *immutable) 527 { 528 struct ib_port_attr attr; 529 int err; 530 531 err = ib_query_port(ibdev, port_num, &attr); 532 if (err) 533 return err; 534 535 immutable->pkey_tbl_len = attr.pkey_tbl_len; 536 immutable->gid_tbl_len = attr.gid_tbl_len; 537 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; 538 if (port_num == 1) 539 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 540 541 return 0; 542 } 543 544 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, 545 struct ib_udata *uhw) 546 { 547 struct mana_ib_dev *dev = container_of(ibdev, 548 struct mana_ib_dev, ib_dev); 549 550 props->max_qp = dev->adapter_caps.max_qp_count; 551 props->max_qp_wr = dev->adapter_caps.max_qp_wr; 552 props->max_cq = dev->adapter_caps.max_cq_count; 553 props->max_cqe = dev->adapter_caps.max_qp_wr; 554 props->max_mr = dev->adapter_caps.max_mr_count; 555 props->max_mr_size = MANA_IB_MAX_MR_SIZE; 556 props->max_send_sge = dev->adapter_caps.max_send_sge_count; 557 props->max_recv_sge = dev->adapter_caps.max_recv_sge_count; 558 559 return 0; 560 } 561 562 int mana_ib_query_port(struct ib_device *ibdev, u32 port, 563 struct ib_port_attr *props) 564 { 565 struct net_device *ndev = mana_ib_get_netdev(ibdev, port); 566 567 if (!ndev) 568 return -EINVAL; 569 570 memset(props, 0, sizeof(*props)); 571 props->max_mtu = IB_MTU_4096; 572 props->active_mtu = ib_mtu_int_to_enum(ndev->mtu); 573 574 if (netif_carrier_ok(ndev) && netif_running(ndev)) { 575 props->state = IB_PORT_ACTIVE; 576 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; 577 } else { 578 props->state = IB_PORT_DOWN; 579 props->phys_state = IB_PORT_PHYS_STATE_DISABLED; 580 } 581 582 props->active_width = IB_WIDTH_4X; 583 props->active_speed = IB_SPEED_EDR; 584 props->pkey_tbl_len = 1; 585 if (port == 1) 586 props->gid_tbl_len = 16; 587 588 return 0; 589 } 590 591 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num) 592 { 593 return IB_LINK_LAYER_ETHERNET; 594 } 595 596 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) 597 { 598 if (index != 0) 599 return -EINVAL; 600 *pkey = IB_DEFAULT_PKEY_FULL; 601 return 0; 602 } 603 604 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 605 union ib_gid *gid) 606 { 607 /* This version doesn't return GID properties */ 608 return 0; 609 } 610 611 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 612 { 613 } 614 615 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev) 616 { 617 struct mana_ib_adapter_caps *caps = &dev->adapter_caps; 618 struct mana_ib_query_adapter_caps_resp resp = {}; 619 struct mana_ib_query_adapter_caps_req req = {}; 620 int err; 621 622 mana_gd_init_req_hdr(&req.hdr, MANA_IB_GET_ADAPTER_CAP, sizeof(req), 623 sizeof(resp)); 624 req.hdr.resp.msg_version = GDMA_MESSAGE_V3; 625 req.hdr.dev_id = dev->gdma_dev->dev_id; 626 627 err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), 628 &req, sizeof(resp), &resp); 629 630 if (err) { 631 ibdev_err(&dev->ib_dev, 632 "Failed to query adapter caps err %d", err); 633 return err; 634 } 635 636 caps->max_sq_id = resp.max_sq_id; 637 caps->max_rq_id = resp.max_rq_id; 638 caps->max_cq_id = resp.max_cq_id; 639 caps->max_qp_count = resp.max_qp_count; 640 caps->max_cq_count = resp.max_cq_count; 641 caps->max_mr_count = resp.max_mr_count; 642 caps->max_pd_count = resp.max_pd_count; 643 caps->max_inbound_read_limit = resp.max_inbound_read_limit; 644 caps->max_outbound_read_limit = resp.max_outbound_read_limit; 645 caps->mw_count = resp.mw_count; 646 caps->max_srq_count = resp.max_srq_count; 647 caps->max_qp_wr = min_t(u32, 648 resp.max_requester_sq_size / GDMA_MAX_SQE_SIZE, 649 resp.max_requester_rq_size / GDMA_MAX_RQE_SIZE); 650 caps->max_inline_data_size = resp.max_inline_data_size; 651 caps->max_send_sge_count = resp.max_send_sge_count; 652 caps->max_recv_sge_count = resp.max_recv_sge_count; 653 654 return 0; 655 } 656 657 int mana_ib_create_eqs(struct mana_ib_dev *mdev) 658 { 659 struct gdma_context *gc = mdev_to_gc(mdev); 660 struct gdma_queue_spec spec = {}; 661 int err, i; 662 663 spec.type = GDMA_EQ; 664 spec.monitor_avl_buf = false; 665 spec.queue_size = EQ_SIZE; 666 spec.eq.callback = NULL; 667 spec.eq.context = mdev; 668 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; 669 spec.eq.msix_index = 0; 670 671 err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq); 672 if (err) 673 return err; 674 675 mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *), 676 GFP_KERNEL); 677 if (!mdev->eqs) { 678 err = -ENOMEM; 679 goto destroy_fatal_eq; 680 } 681 682 for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) { 683 spec.eq.msix_index = (i + 1) % gc->num_msix_usable; 684 err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]); 685 if (err) 686 goto destroy_eqs; 687 } 688 689 return 0; 690 691 destroy_eqs: 692 while (i-- > 0) 693 mana_gd_destroy_queue(gc, mdev->eqs[i]); 694 kfree(mdev->eqs); 695 destroy_fatal_eq: 696 mana_gd_destroy_queue(gc, mdev->fatal_err_eq); 697 return err; 698 } 699 700 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev) 701 { 702 struct gdma_context *gc = mdev_to_gc(mdev); 703 int i; 704 705 mana_gd_destroy_queue(gc, mdev->fatal_err_eq); 706 707 for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) 708 mana_gd_destroy_queue(gc, mdev->eqs[i]); 709 710 kfree(mdev->eqs); 711 } 712 713 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev) 714 { 715 struct mana_rnic_create_adapter_resp resp = {}; 716 struct mana_rnic_create_adapter_req req = {}; 717 struct gdma_context *gc = mdev_to_gc(mdev); 718 int err; 719 720 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp)); 721 req.hdr.req.msg_version = GDMA_MESSAGE_V2; 722 req.hdr.dev_id = gc->mana_ib.dev_id; 723 req.notify_eq_id = mdev->fatal_err_eq->id; 724 725 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 726 if (err) { 727 ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err); 728 return err; 729 } 730 mdev->adapter_handle = resp.adapter; 731 732 return 0; 733 } 734 735 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev) 736 { 737 struct mana_rnic_destroy_adapter_resp resp = {}; 738 struct mana_rnic_destroy_adapter_req req = {}; 739 struct gdma_context *gc; 740 int err; 741 742 gc = mdev_to_gc(mdev); 743 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp)); 744 req.hdr.dev_id = gc->mana_ib.dev_id; 745 req.adapter = mdev->adapter_handle; 746 747 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 748 if (err) { 749 ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err); 750 return err; 751 } 752 753 return 0; 754 } 755 756 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context) 757 { 758 struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev); 759 enum rdma_network_type ntype = rdma_gid_attr_network_type(attr); 760 struct mana_rnic_config_addr_resp resp = {}; 761 struct gdma_context *gc = mdev_to_gc(mdev); 762 struct mana_rnic_config_addr_req req = {}; 763 int err; 764 765 if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) { 766 ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype); 767 return -EINVAL; 768 } 769 770 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp)); 771 req.hdr.dev_id = gc->mana_ib.dev_id; 772 req.adapter = mdev->adapter_handle; 773 req.op = ADDR_OP_ADD; 774 req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4; 775 copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid)); 776 777 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 778 if (err) { 779 ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err); 780 return err; 781 } 782 783 return 0; 784 } 785 786 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context) 787 { 788 struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev); 789 enum rdma_network_type ntype = rdma_gid_attr_network_type(attr); 790 struct mana_rnic_config_addr_resp resp = {}; 791 struct gdma_context *gc = mdev_to_gc(mdev); 792 struct mana_rnic_config_addr_req req = {}; 793 int err; 794 795 if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) { 796 ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype); 797 return -EINVAL; 798 } 799 800 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp)); 801 req.hdr.dev_id = gc->mana_ib.dev_id; 802 req.adapter = mdev->adapter_handle; 803 req.op = ADDR_OP_REMOVE; 804 req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4; 805 copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid)); 806 807 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 808 if (err) { 809 ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err); 810 return err; 811 } 812 813 return 0; 814 } 815 816 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac) 817 { 818 struct mana_rnic_config_mac_addr_resp resp = {}; 819 struct mana_rnic_config_mac_addr_req req = {}; 820 struct gdma_context *gc = mdev_to_gc(mdev); 821 int err; 822 823 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp)); 824 req.hdr.dev_id = gc->mana_ib.dev_id; 825 req.adapter = mdev->adapter_handle; 826 req.op = op; 827 copy_in_reverse(req.mac_addr, mac, ETH_ALEN); 828 829 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 830 if (err) { 831 ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err); 832 return err; 833 } 834 835 return 0; 836 } 837 838 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell) 839 { 840 struct gdma_context *gc = mdev_to_gc(mdev); 841 struct mana_rnic_create_cq_resp resp = {}; 842 struct mana_rnic_create_cq_req req = {}; 843 int err; 844 845 mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp)); 846 req.hdr.dev_id = gc->mana_ib.dev_id; 847 req.adapter = mdev->adapter_handle; 848 req.gdma_region = cq->queue.gdma_region; 849 req.eq_id = mdev->eqs[cq->comp_vector]->id; 850 req.doorbell_page = doorbell; 851 852 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 853 854 if (err) { 855 ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err); 856 return err; 857 } 858 859 cq->queue.id = resp.cq_id; 860 cq->cq_handle = resp.cq_handle; 861 /* The GDMA region is now owned by the CQ handle */ 862 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; 863 864 return 0; 865 } 866 867 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) 868 { 869 struct gdma_context *gc = mdev_to_gc(mdev); 870 struct mana_rnic_destroy_cq_resp resp = {}; 871 struct mana_rnic_destroy_cq_req req = {}; 872 int err; 873 874 if (cq->cq_handle == INVALID_MANA_HANDLE) 875 return 0; 876 877 mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp)); 878 req.hdr.dev_id = gc->mana_ib.dev_id; 879 req.adapter = mdev->adapter_handle; 880 req.cq_handle = cq->cq_handle; 881 882 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 883 884 if (err) { 885 ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err); 886 return err; 887 } 888 889 return 0; 890 } 891