1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved. 4 */ 5 6 #include "mana_ib.h" 7 8 static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, 9 struct net_device *ndev, 10 mana_handle_t default_rxobj, 11 mana_handle_t ind_table[], 12 u32 log_ind_tbl_size, u32 rx_hash_key_len, 13 u8 *rx_hash_key) 14 { 15 struct mana_port_context *mpc = netdev_priv(ndev); 16 struct mana_cfg_rx_steer_req_v2 *req; 17 struct mana_cfg_rx_steer_resp resp = {}; 18 struct gdma_context *gc; 19 u32 req_buf_size; 20 int i, err; 21 22 gc = mdev_to_gc(dev); 23 24 req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE); 25 req = kzalloc(req_buf_size, GFP_KERNEL); 26 if (!req) 27 return -ENOMEM; 28 29 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size, 30 sizeof(resp)); 31 32 req->hdr.req.msg_version = GDMA_MESSAGE_V2; 33 34 req->vport = mpc->port_handle; 35 req->rx_enable = 1; 36 req->update_default_rxobj = 1; 37 req->default_rxobj = default_rxobj; 38 req->hdr.dev_id = gc->mana.dev_id; 39 40 /* If there are more than 1 entries in indirection table, enable RSS */ 41 if (log_ind_tbl_size) 42 req->rss_enable = true; 43 44 req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE; 45 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, 46 indir_tab); 47 req->update_indir_tab = true; 48 req->cqe_coalescing_enable = 1; 49 50 /* The ind table passed to the hardware must have 51 * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb 52 * ind_table to MANA_INDIRECT_TABLE_SIZE if required 53 */ 54 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size); 55 for (i = 0; i < MANA_INDIRECT_TABLE_DEF_SIZE; i++) { 56 req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)]; 57 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i, 58 req->indir_tab[i]); 59 } 60 61 req->update_hashkey = true; 62 if (rx_hash_key_len) 63 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len); 64 else 65 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE); 66 67 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n", 68 req->vport, default_rxobj); 69 70 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp); 71 if (err) { 72 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err); 73 goto out; 74 } 75 76 if (resp.hdr.status) { 77 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n", 78 resp.hdr.status); 79 err = -EPROTO; 80 goto out; 81 } 82 83 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n", 84 mpc->port_handle, log_ind_tbl_size); 85 86 out: 87 kfree(req); 88 return err; 89 } 90 91 static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, 92 struct ib_qp_init_attr *attr, 93 struct ib_udata *udata) 94 { 95 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 96 struct mana_ib_dev *mdev = 97 container_of(pd->device, struct mana_ib_dev, ib_dev); 98 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl; 99 struct mana_ib_create_qp_rss_resp resp = {}; 100 struct mana_ib_create_qp_rss ucmd = {}; 101 mana_handle_t *mana_ind_table; 102 struct mana_port_context *mpc; 103 unsigned int ind_tbl_size; 104 struct net_device *ndev; 105 struct mana_ib_cq *cq; 106 struct mana_ib_wq *wq; 107 struct mana_eq *eq; 108 struct ib_cq *ibcq; 109 struct ib_wq *ibwq; 110 int i = 0; 111 u32 port; 112 int ret; 113 114 if (!udata || udata->inlen < sizeof(ucmd)) 115 return -EINVAL; 116 117 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); 118 if (ret) { 119 ibdev_dbg(&mdev->ib_dev, 120 "Failed copy from udata for create rss-qp, err %d\n", 121 ret); 122 return ret; 123 } 124 125 if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) { 126 ibdev_dbg(&mdev->ib_dev, 127 "Requested max_recv_wr %d exceeding limit\n", 128 attr->cap.max_recv_wr); 129 return -EINVAL; 130 } 131 132 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) { 133 ibdev_dbg(&mdev->ib_dev, 134 "Requested max_recv_sge %d exceeding limit\n", 135 attr->cap.max_recv_sge); 136 return -EINVAL; 137 } 138 139 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size; 140 if (ind_tbl_size > MANA_INDIRECT_TABLE_DEF_SIZE) { 141 ibdev_dbg(&mdev->ib_dev, 142 "Indirect table size %d exceeding limit\n", 143 ind_tbl_size); 144 return -EINVAL; 145 } 146 147 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) { 148 ibdev_dbg(&mdev->ib_dev, 149 "RX Hash function is not supported, %d\n", 150 ucmd.rx_hash_function); 151 return -EINVAL; 152 } 153 154 /* IB ports start with 1, MANA start with 0 */ 155 port = ucmd.port; 156 ndev = mana_ib_get_netdev(pd->device, port); 157 if (!ndev) { 158 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n", 159 port); 160 return -EINVAL; 161 } 162 mpc = netdev_priv(ndev); 163 164 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n", 165 ucmd.rx_hash_function, port); 166 167 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t), 168 GFP_KERNEL); 169 if (!mana_ind_table) { 170 ret = -ENOMEM; 171 goto fail; 172 } 173 174 qp->port = port; 175 176 for (i = 0; i < ind_tbl_size; i++) { 177 struct mana_obj_spec wq_spec = {}; 178 struct mana_obj_spec cq_spec = {}; 179 180 ibwq = ind_tbl->ind_tbl[i]; 181 wq = container_of(ibwq, struct mana_ib_wq, ibwq); 182 183 ibcq = ibwq->cq; 184 cq = container_of(ibcq, struct mana_ib_cq, ibcq); 185 186 wq_spec.gdma_region = wq->queue.gdma_region; 187 wq_spec.queue_size = wq->wq_buf_size; 188 189 cq_spec.gdma_region = cq->queue.gdma_region; 190 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; 191 cq_spec.modr_ctx_id = 0; 192 eq = &mpc->ac->eqs[cq->comp_vector]; 193 cq_spec.attached_eq = eq->eq->id; 194 195 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ, 196 &wq_spec, &cq_spec, &wq->rx_object); 197 if (ret) { 198 /* Do cleanup starting with index i-1 */ 199 i--; 200 goto fail; 201 } 202 203 /* The GDMA regions are now owned by the WQ object */ 204 wq->queue.gdma_region = GDMA_INVALID_DMA_REGION; 205 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; 206 207 wq->queue.id = wq_spec.queue_index; 208 cq->queue.id = cq_spec.queue_index; 209 210 ibdev_dbg(&mdev->ib_dev, 211 "rx_object 0x%llx wq id %llu cq id %llu\n", 212 wq->rx_object, wq->queue.id, cq->queue.id); 213 214 resp.entries[i].cqid = cq->queue.id; 215 resp.entries[i].wqid = wq->queue.id; 216 217 mana_ind_table[i] = wq->rx_object; 218 219 /* Create CQ table entry */ 220 ret = mana_ib_install_cq_cb(mdev, cq); 221 if (ret) 222 goto fail; 223 } 224 resp.num_entries = i; 225 226 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object, 227 mana_ind_table, 228 ind_tbl->log_ind_tbl_size, 229 ucmd.rx_hash_key_len, 230 ucmd.rx_hash_key); 231 if (ret) 232 goto fail; 233 234 ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); 235 if (ret) { 236 ibdev_dbg(&mdev->ib_dev, 237 "Failed to copy to udata create rss-qp, %d\n", 238 ret); 239 goto fail; 240 } 241 242 kfree(mana_ind_table); 243 244 return 0; 245 246 fail: 247 while (i-- > 0) { 248 ibwq = ind_tbl->ind_tbl[i]; 249 ibcq = ibwq->cq; 250 wq = container_of(ibwq, struct mana_ib_wq, ibwq); 251 cq = container_of(ibcq, struct mana_ib_cq, ibcq); 252 253 mana_ib_remove_cq_cb(mdev, cq); 254 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); 255 } 256 257 kfree(mana_ind_table); 258 259 return ret; 260 } 261 262 static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, 263 struct ib_qp_init_attr *attr, 264 struct ib_udata *udata) 265 { 266 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd); 267 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 268 struct mana_ib_dev *mdev = 269 container_of(ibpd->device, struct mana_ib_dev, ib_dev); 270 struct mana_ib_cq *send_cq = 271 container_of(attr->send_cq, struct mana_ib_cq, ibcq); 272 struct mana_ib_ucontext *mana_ucontext = 273 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, 274 ibucontext); 275 struct mana_ib_create_qp_resp resp = {}; 276 struct mana_ib_create_qp ucmd = {}; 277 struct mana_obj_spec wq_spec = {}; 278 struct mana_obj_spec cq_spec = {}; 279 struct mana_port_context *mpc; 280 struct net_device *ndev; 281 struct mana_eq *eq; 282 int eq_vec; 283 u32 port; 284 int err; 285 286 if (!mana_ucontext || udata->inlen < sizeof(ucmd)) 287 return -EINVAL; 288 289 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); 290 if (err) { 291 ibdev_dbg(&mdev->ib_dev, 292 "Failed to copy from udata create qp-raw, %d\n", err); 293 return err; 294 } 295 296 if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) { 297 ibdev_dbg(&mdev->ib_dev, 298 "Requested max_send_wr %d exceeding limit\n", 299 attr->cap.max_send_wr); 300 return -EINVAL; 301 } 302 303 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) { 304 ibdev_dbg(&mdev->ib_dev, 305 "Requested max_send_sge %d exceeding limit\n", 306 attr->cap.max_send_sge); 307 return -EINVAL; 308 } 309 310 port = ucmd.port; 311 ndev = mana_ib_get_netdev(ibpd->device, port); 312 if (!ndev) { 313 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n", 314 port); 315 return -EINVAL; 316 } 317 mpc = netdev_priv(ndev); 318 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc); 319 320 err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell); 321 if (err) 322 return -ENODEV; 323 324 qp->port = port; 325 326 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n", 327 ucmd.sq_buf_addr, ucmd.port); 328 329 err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq); 330 if (err) { 331 ibdev_dbg(&mdev->ib_dev, 332 "Failed to create queue for create qp-raw, err %d\n", err); 333 goto err_free_vport; 334 } 335 336 /* Create a WQ on the same port handle used by the Ethernet */ 337 wq_spec.gdma_region = qp->raw_sq.gdma_region; 338 wq_spec.queue_size = ucmd.sq_buf_size; 339 340 cq_spec.gdma_region = send_cq->queue.gdma_region; 341 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; 342 cq_spec.modr_ctx_id = 0; 343 eq_vec = send_cq->comp_vector; 344 eq = &mpc->ac->eqs[eq_vec]; 345 cq_spec.attached_eq = eq->eq->id; 346 347 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec, 348 &cq_spec, &qp->qp_handle); 349 if (err) { 350 ibdev_dbg(&mdev->ib_dev, 351 "Failed to create wq for create raw-qp, err %d\n", 352 err); 353 goto err_destroy_queue; 354 } 355 356 /* The GDMA regions are now owned by the WQ object */ 357 qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION; 358 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; 359 360 qp->raw_sq.id = wq_spec.queue_index; 361 send_cq->queue.id = cq_spec.queue_index; 362 363 /* Create CQ table entry */ 364 err = mana_ib_install_cq_cb(mdev, send_cq); 365 if (err) 366 goto err_destroy_wq_obj; 367 368 ibdev_dbg(&mdev->ib_dev, 369 "qp->qp_handle 0x%llx sq id %llu cq id %llu\n", 370 qp->qp_handle, qp->raw_sq.id, send_cq->queue.id); 371 372 resp.sqid = qp->raw_sq.id; 373 resp.cqid = send_cq->queue.id; 374 resp.tx_vp_offset = pd->tx_vp_offset; 375 376 err = ib_copy_to_udata(udata, &resp, sizeof(resp)); 377 if (err) { 378 ibdev_dbg(&mdev->ib_dev, 379 "Failed copy udata for create qp-raw, %d\n", 380 err); 381 goto err_remove_cq_cb; 382 } 383 384 return 0; 385 386 err_remove_cq_cb: 387 mana_ib_remove_cq_cb(mdev, send_cq); 388 389 err_destroy_wq_obj: 390 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle); 391 392 err_destroy_queue: 393 mana_ib_destroy_queue(mdev, &qp->raw_sq); 394 395 err_free_vport: 396 mana_ib_uncfg_vport(mdev, pd, port); 397 398 return err; 399 } 400 401 static u32 mana_ib_wqe_size(u32 sge, u32 oob_size) 402 { 403 u32 wqe_size = sge * sizeof(struct gdma_sge) + sizeof(struct gdma_wqe) + oob_size; 404 405 return ALIGN(wqe_size, GDMA_WQE_BU_SIZE); 406 } 407 408 static u32 mana_ib_queue_size(struct ib_qp_init_attr *attr, u32 queue_type) 409 { 410 u32 queue_size; 411 412 switch (attr->qp_type) { 413 case IB_QPT_UD: 414 case IB_QPT_GSI: 415 if (queue_type == MANA_UD_SEND_QUEUE) 416 queue_size = attr->cap.max_send_wr * 417 mana_ib_wqe_size(attr->cap.max_send_sge, INLINE_OOB_LARGE_SIZE); 418 else 419 queue_size = attr->cap.max_recv_wr * 420 mana_ib_wqe_size(attr->cap.max_recv_sge, INLINE_OOB_SMALL_SIZE); 421 break; 422 default: 423 return 0; 424 } 425 426 return MANA_PAGE_ALIGN(roundup_pow_of_two(queue_size)); 427 } 428 429 static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32 queue_type) 430 { 431 enum gdma_queue_type type; 432 433 switch (attr->qp_type) { 434 case IB_QPT_UD: 435 case IB_QPT_GSI: 436 if (queue_type == MANA_UD_SEND_QUEUE) 437 type = GDMA_SQ; 438 else 439 type = GDMA_RQ; 440 break; 441 default: 442 type = GDMA_INVALID_QUEUE; 443 } 444 return type; 445 } 446 447 static int mana_table_store_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) 448 { 449 return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp, 450 GFP_KERNEL); 451 } 452 453 static void mana_table_remove_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) 454 { 455 xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num); 456 } 457 458 static int mana_table_store_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) 459 { 460 u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK; 461 u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id; 462 int err; 463 464 err = xa_insert_irq(&mdev->qp_table_wq, qids, qp, GFP_KERNEL); 465 if (err) 466 return err; 467 468 err = xa_insert_irq(&mdev->qp_table_wq, qidr, qp, GFP_KERNEL); 469 if (err) 470 goto remove_sq; 471 472 return 0; 473 474 remove_sq: 475 xa_erase_irq(&mdev->qp_table_wq, qids); 476 return err; 477 } 478 479 static void mana_table_remove_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) 480 { 481 u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK; 482 u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id; 483 484 xa_erase_irq(&mdev->qp_table_wq, qids); 485 xa_erase_irq(&mdev->qp_table_wq, qidr); 486 } 487 488 static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp) 489 { 490 refcount_set(&qp->refcount, 1); 491 init_completion(&qp->free); 492 493 switch (qp->ibqp.qp_type) { 494 case IB_QPT_RC: 495 return mana_table_store_rc_qp(mdev, qp); 496 case IB_QPT_UD: 497 case IB_QPT_GSI: 498 return mana_table_store_ud_qp(mdev, qp); 499 default: 500 ibdev_dbg(&mdev->ib_dev, "Unknown QP type for storing in mana table, %d\n", 501 qp->ibqp.qp_type); 502 } 503 504 return -EINVAL; 505 } 506 507 static void mana_table_remove_qp(struct mana_ib_dev *mdev, 508 struct mana_ib_qp *qp) 509 { 510 switch (qp->ibqp.qp_type) { 511 case IB_QPT_RC: 512 mana_table_remove_rc_qp(mdev, qp); 513 break; 514 case IB_QPT_UD: 515 case IB_QPT_GSI: 516 mana_table_remove_ud_qp(mdev, qp); 517 break; 518 default: 519 ibdev_dbg(&mdev->ib_dev, "Unknown QP type for removing from mana table, %d\n", 520 qp->ibqp.qp_type); 521 return; 522 } 523 mana_put_qp_ref(qp); 524 wait_for_completion(&qp->free); 525 } 526 527 static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd, 528 struct ib_qp_init_attr *attr, struct ib_udata *udata) 529 { 530 struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev); 531 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 532 struct mana_ib_create_rc_qp_resp resp = {}; 533 struct mana_ib_ucontext *mana_ucontext; 534 struct mana_ib_create_rc_qp ucmd = {}; 535 int i, err, j; 536 u64 flags = 0; 537 u32 doorbell; 538 539 if (!udata || udata->inlen < sizeof(ucmd)) 540 return -EINVAL; 541 542 mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext); 543 doorbell = mana_ucontext->doorbell; 544 flags = MANA_RC_FLAG_NO_FMR; 545 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); 546 if (err) { 547 ibdev_dbg(&mdev->ib_dev, "Failed to copy from udata, %d\n", err); 548 return err; 549 } 550 551 for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) { 552 /* skip FMR for user-level RC QPs */ 553 if (i == MANA_RC_SEND_QUEUE_FMR) { 554 qp->rc_qp.queues[i].id = INVALID_QUEUE_ID; 555 qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION; 556 continue; 557 } 558 err = mana_ib_create_queue(mdev, ucmd.queue_buf[j], ucmd.queue_size[j], 559 &qp->rc_qp.queues[i]); 560 if (err) { 561 ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n", i, err); 562 goto destroy_queues; 563 } 564 j++; 565 } 566 567 err = mana_ib_gd_create_rc_qp(mdev, qp, attr, doorbell, flags); 568 if (err) { 569 ibdev_err(&mdev->ib_dev, "Failed to create rc qp %d\n", err); 570 goto destroy_queues; 571 } 572 qp->ibqp.qp_num = qp->rc_qp.queues[MANA_RC_RECV_QUEUE_RESPONDER].id; 573 qp->port = attr->port_num; 574 575 if (udata) { 576 for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) { 577 if (i == MANA_RC_SEND_QUEUE_FMR) 578 continue; 579 resp.queue_id[j] = qp->rc_qp.queues[i].id; 580 j++; 581 } 582 err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); 583 if (err) { 584 ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err); 585 goto destroy_qp; 586 } 587 } 588 589 err = mana_table_store_qp(mdev, qp); 590 if (err) 591 goto destroy_qp; 592 593 return 0; 594 595 destroy_qp: 596 mana_ib_gd_destroy_rc_qp(mdev, qp); 597 destroy_queues: 598 while (i-- > 0) 599 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]); 600 return err; 601 } 602 603 static void mana_add_qp_to_cqs(struct mana_ib_qp *qp) 604 { 605 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); 606 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq); 607 unsigned long flags; 608 609 spin_lock_irqsave(&send_cq->cq_lock, flags); 610 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 611 spin_unlock_irqrestore(&send_cq->cq_lock, flags); 612 613 spin_lock_irqsave(&recv_cq->cq_lock, flags); 614 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 615 spin_unlock_irqrestore(&recv_cq->cq_lock, flags); 616 } 617 618 static void mana_remove_qp_from_cqs(struct mana_ib_qp *qp) 619 { 620 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); 621 struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq); 622 unsigned long flags; 623 624 spin_lock_irqsave(&send_cq->cq_lock, flags); 625 list_del(&qp->cq_send_list); 626 spin_unlock_irqrestore(&send_cq->cq_lock, flags); 627 628 spin_lock_irqsave(&recv_cq->cq_lock, flags); 629 list_del(&qp->cq_recv_list); 630 spin_unlock_irqrestore(&recv_cq->cq_lock, flags); 631 } 632 633 static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd, 634 struct ib_qp_init_attr *attr, struct ib_udata *udata) 635 { 636 struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev); 637 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 638 u32 doorbell, queue_size; 639 int i, err; 640 641 if (udata) { 642 ibdev_dbg(&mdev->ib_dev, "User-level UD QPs are not supported\n"); 643 return -EOPNOTSUPP; 644 } 645 646 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) { 647 queue_size = mana_ib_queue_size(attr, i); 648 err = mana_ib_create_kernel_queue(mdev, queue_size, mana_ib_queue_type(attr, i), 649 &qp->ud_qp.queues[i]); 650 if (err) { 651 ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n", 652 i, err); 653 goto destroy_queues; 654 } 655 } 656 doorbell = mdev->gdma_dev->doorbell; 657 658 err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr, 659 sizeof(struct ud_rq_shadow_wqe)); 660 if (err) { 661 ibdev_err(&mdev->ib_dev, "Failed to create shadow rq err %d\n", err); 662 goto destroy_queues; 663 } 664 err = create_shadow_queue(&qp->shadow_sq, attr->cap.max_send_wr, 665 sizeof(struct ud_sq_shadow_wqe)); 666 if (err) { 667 ibdev_err(&mdev->ib_dev, "Failed to create shadow sq err %d\n", err); 668 goto destroy_shadow_queues; 669 } 670 671 err = mana_ib_gd_create_ud_qp(mdev, qp, attr, doorbell, attr->qp_type); 672 if (err) { 673 ibdev_err(&mdev->ib_dev, "Failed to create ud qp %d\n", err); 674 goto destroy_shadow_queues; 675 } 676 qp->ibqp.qp_num = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id; 677 qp->port = attr->port_num; 678 679 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) 680 qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id; 681 682 err = mana_table_store_qp(mdev, qp); 683 if (err) 684 goto destroy_qp; 685 686 mana_add_qp_to_cqs(qp); 687 688 return 0; 689 690 destroy_qp: 691 mana_ib_gd_destroy_ud_qp(mdev, qp); 692 destroy_shadow_queues: 693 destroy_shadow_queue(&qp->shadow_rq); 694 destroy_shadow_queue(&qp->shadow_sq); 695 destroy_queues: 696 while (i-- > 0) 697 mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]); 698 return err; 699 } 700 701 int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, 702 struct ib_udata *udata) 703 { 704 switch (attr->qp_type) { 705 case IB_QPT_RAW_PACKET: 706 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */ 707 if (attr->rwq_ind_tbl) 708 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr, 709 udata); 710 711 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata); 712 case IB_QPT_RC: 713 return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata); 714 case IB_QPT_UD: 715 case IB_QPT_GSI: 716 return mana_ib_create_ud_qp(ibqp, ibqp->pd, attr, udata); 717 default: 718 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n", 719 attr->qp_type); 720 } 721 722 return -EINVAL; 723 } 724 725 static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 726 int attr_mask, struct ib_udata *udata) 727 { 728 struct mana_ib_dev *mdev = container_of(ibqp->device, struct mana_ib_dev, ib_dev); 729 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 730 struct mana_rnic_set_qp_state_resp resp = {}; 731 struct mana_rnic_set_qp_state_req req = {}; 732 struct gdma_context *gc = mdev_to_gc(mdev); 733 struct mana_port_context *mpc; 734 struct net_device *ndev; 735 int err; 736 737 mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp)); 738 req.hdr.dev_id = mdev->gdma_dev->dev_id; 739 req.adapter = mdev->adapter_handle; 740 req.qp_handle = qp->qp_handle; 741 req.qp_state = attr->qp_state; 742 req.attr_mask = attr_mask; 743 req.path_mtu = attr->path_mtu; 744 req.rq_psn = attr->rq_psn; 745 req.sq_psn = attr->sq_psn; 746 req.dest_qpn = attr->dest_qp_num; 747 req.max_dest_rd_atomic = attr->max_dest_rd_atomic; 748 req.retry_cnt = attr->retry_cnt; 749 req.rnr_retry = attr->rnr_retry; 750 req.min_rnr_timer = attr->min_rnr_timer; 751 if (attr_mask & IB_QP_AV) { 752 ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port); 753 if (!ndev) { 754 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n", 755 ibqp->port, ibqp->qp_num); 756 return -EINVAL; 757 } 758 mpc = netdev_priv(ndev); 759 copy_in_reverse(req.ah_attr.src_mac, mpc->mac_addr, ETH_ALEN); 760 copy_in_reverse(req.ah_attr.dest_mac, attr->ah_attr.roce.dmac, ETH_ALEN); 761 copy_in_reverse(req.ah_attr.src_addr, attr->ah_attr.grh.sgid_attr->gid.raw, 762 sizeof(union ib_gid)); 763 copy_in_reverse(req.ah_attr.dest_addr, attr->ah_attr.grh.dgid.raw, 764 sizeof(union ib_gid)); 765 if (rdma_gid_attr_network_type(attr->ah_attr.grh.sgid_attr) == RDMA_NETWORK_IPV4) { 766 req.ah_attr.src_addr_type = SGID_TYPE_IPV4; 767 req.ah_attr.dest_addr_type = SGID_TYPE_IPV4; 768 } else { 769 req.ah_attr.src_addr_type = SGID_TYPE_IPV6; 770 req.ah_attr.dest_addr_type = SGID_TYPE_IPV6; 771 } 772 req.ah_attr.dest_port = ROCE_V2_UDP_DPORT; 773 req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, 774 ibqp->qp_num, attr->dest_qp_num); 775 req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2; 776 req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit; 777 } 778 779 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); 780 if (err) { 781 ibdev_err(&mdev->ib_dev, "Failed modify qp err %d", err); 782 return err; 783 } 784 785 return 0; 786 } 787 788 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 789 int attr_mask, struct ib_udata *udata) 790 { 791 switch (ibqp->qp_type) { 792 case IB_QPT_RC: 793 case IB_QPT_UD: 794 case IB_QPT_GSI: 795 return mana_ib_gd_modify_qp(ibqp, attr, attr_mask, udata); 796 default: 797 ibdev_dbg(ibqp->device, "Modify QP type %u not supported", ibqp->qp_type); 798 return -EOPNOTSUPP; 799 } 800 } 801 802 static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp, 803 struct ib_rwq_ind_table *ind_tbl, 804 struct ib_udata *udata) 805 { 806 struct mana_ib_dev *mdev = 807 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); 808 struct mana_port_context *mpc; 809 struct net_device *ndev; 810 struct mana_ib_wq *wq; 811 struct ib_wq *ibwq; 812 int i; 813 814 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port); 815 mpc = netdev_priv(ndev); 816 817 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { 818 ibwq = ind_tbl->ind_tbl[i]; 819 wq = container_of(ibwq, struct mana_ib_wq, ibwq); 820 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n", 821 wq->rx_object); 822 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); 823 } 824 825 return 0; 826 } 827 828 static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata) 829 { 830 struct mana_ib_dev *mdev = 831 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); 832 struct ib_pd *ibpd = qp->ibqp.pd; 833 struct mana_port_context *mpc; 834 struct net_device *ndev; 835 struct mana_ib_pd *pd; 836 837 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port); 838 mpc = netdev_priv(ndev); 839 pd = container_of(ibpd, struct mana_ib_pd, ibpd); 840 841 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle); 842 843 mana_ib_destroy_queue(mdev, &qp->raw_sq); 844 845 mana_ib_uncfg_vport(mdev, pd, qp->port); 846 847 return 0; 848 } 849 850 static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata) 851 { 852 struct mana_ib_dev *mdev = 853 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); 854 int i; 855 856 mana_table_remove_qp(mdev, qp); 857 858 /* Ignore return code as there is not much we can do about it. 859 * The error message is printed inside. 860 */ 861 mana_ib_gd_destroy_rc_qp(mdev, qp); 862 for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) 863 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]); 864 865 return 0; 866 } 867 868 static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata) 869 { 870 struct mana_ib_dev *mdev = 871 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev); 872 int i; 873 874 mana_remove_qp_from_cqs(qp); 875 mana_table_remove_qp(mdev, qp); 876 877 destroy_shadow_queue(&qp->shadow_rq); 878 destroy_shadow_queue(&qp->shadow_sq); 879 880 /* Ignore return code as there is not much we can do about it. 881 * The error message is printed inside. 882 */ 883 mana_ib_gd_destroy_ud_qp(mdev, qp); 884 for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i) 885 mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]); 886 887 return 0; 888 } 889 890 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) 891 { 892 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); 893 894 switch (ibqp->qp_type) { 895 case IB_QPT_RAW_PACKET: 896 if (ibqp->rwq_ind_tbl) 897 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl, 898 udata); 899 900 return mana_ib_destroy_qp_raw(qp, udata); 901 case IB_QPT_RC: 902 return mana_ib_destroy_rc_qp(qp, udata); 903 case IB_QPT_UD: 904 case IB_QPT_GSI: 905 return mana_ib_destroy_ud_qp(qp, udata); 906 default: 907 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n", 908 ibqp->qp_type); 909 } 910 911 return -ENOENT; 912 } 913