1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/pci.h> 35 #include <rdma/ib_addr.h> 36 #include <rdma/ib_umem.h> 37 #include <rdma/uverbs_ioctl.h> 38 #include "hns_roce_common.h" 39 #include "hns_roce_device.h" 40 #include "hns_roce_hem.h" 41 42 static void flush_work_handle(struct work_struct *work) 43 { 44 struct hns_roce_work *flush_work = container_of(work, 45 struct hns_roce_work, work); 46 struct hns_roce_qp *hr_qp = container_of(flush_work, 47 struct hns_roce_qp, flush_work); 48 struct device *dev = flush_work->hr_dev->dev; 49 struct ib_qp_attr attr; 50 int attr_mask; 51 int ret; 52 53 attr_mask = IB_QP_STATE; 54 attr.qp_state = IB_QPS_ERR; 55 56 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { 57 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); 58 if (ret) 59 dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n", 60 ret); 61 } 62 63 /* 64 * make sure we signal QP destroy leg that flush QP was completed 65 * so that it can safely proceed ahead now and destroy QP 66 */ 67 if (refcount_dec_and_test(&hr_qp->refcount)) 68 complete(&hr_qp->free); 69 } 70 71 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 72 { 73 struct hns_roce_work *flush_work = &hr_qp->flush_work; 74 75 flush_work->hr_dev = hr_dev; 76 INIT_WORK(&flush_work->work, flush_work_handle); 77 refcount_inc(&hr_qp->refcount); 78 queue_work(hr_dev->irq_workq, &flush_work->work); 79 } 80 81 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) 82 { 83 /* 84 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state 85 * gets into errored mode. Hence, as a workaround to this 86 * hardware limitation, driver needs to assist in flushing. But 87 * the flushing operation uses mailbox to convey the QP state to 88 * the hardware and which can sleep due to the mutex protection 89 * around the mailbox calls. Hence, use the deferred flush for 90 * now. 91 */ 92 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) 93 init_flush_work(dev, qp); 94 } 95 96 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) 97 { 98 struct device *dev = hr_dev->dev; 99 struct hns_roce_qp *qp; 100 101 xa_lock(&hr_dev->qp_table_xa); 102 qp = __hns_roce_qp_lookup(hr_dev, qpn); 103 if (qp) 104 refcount_inc(&qp->refcount); 105 xa_unlock(&hr_dev->qp_table_xa); 106 107 if (!qp) { 108 dev_warn(dev, "async event for bogus QP %08x\n", qpn); 109 return; 110 } 111 112 if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || 113 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || 114 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || 115 event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || 116 event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { 117 qp->state = IB_QPS_ERR; 118 119 flush_cqe(hr_dev, qp); 120 } 121 122 qp->event(qp, (enum hns_roce_event)event_type); 123 124 if (refcount_dec_and_test(&qp->refcount)) 125 complete(&qp->free); 126 } 127 128 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, 129 enum hns_roce_event type) 130 { 131 struct ib_qp *ibqp = &hr_qp->ibqp; 132 struct ib_event event; 133 134 if (ibqp->event_handler) { 135 event.device = ibqp->device; 136 event.element.qp = ibqp; 137 switch (type) { 138 case HNS_ROCE_EVENT_TYPE_PATH_MIG: 139 event.event = IB_EVENT_PATH_MIG; 140 break; 141 case HNS_ROCE_EVENT_TYPE_COMM_EST: 142 event.event = IB_EVENT_COMM_EST; 143 break; 144 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: 145 event.event = IB_EVENT_SQ_DRAINED; 146 break; 147 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: 148 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 149 break; 150 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: 151 event.event = IB_EVENT_QP_FATAL; 152 break; 153 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: 154 event.event = IB_EVENT_PATH_MIG_ERR; 155 break; 156 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: 157 event.event = IB_EVENT_QP_REQ_ERR; 158 break; 159 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: 160 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: 161 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: 162 event.event = IB_EVENT_QP_ACCESS_ERR; 163 break; 164 default: 165 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 166 type, hr_qp->qpn); 167 return; 168 } 169 ibqp->event_handler(&event, ibqp->qp_context); 170 } 171 } 172 173 static u8 get_affinity_cq_bank(u8 qp_bank) 174 { 175 return (qp_bank >> 1) & CQ_BANKID_MASK; 176 } 177 178 static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr, 179 struct hns_roce_bank *bank) 180 { 181 #define INVALID_LOAD_QPNUM 0xFFFFFFFF 182 struct ib_cq *scq = init_attr->send_cq; 183 u32 least_load = INVALID_LOAD_QPNUM; 184 unsigned long cqn = 0; 185 u8 bankid = 0; 186 u32 bankcnt; 187 u8 i; 188 189 if (scq) 190 cqn = to_hr_cq(scq)->cqn; 191 192 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { 193 if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK))) 194 continue; 195 196 bankcnt = bank[i].inuse; 197 if (bankcnt < least_load) { 198 least_load = bankcnt; 199 bankid = i; 200 } 201 } 202 203 return bankid; 204 } 205 206 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid, 207 unsigned long *qpn) 208 { 209 int id; 210 211 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL); 212 if (id < 0) { 213 id = ida_alloc_range(&bank->ida, bank->min, bank->max, 214 GFP_KERNEL); 215 if (id < 0) 216 return id; 217 } 218 219 /* the QPN should keep increasing until the max value is reached. */ 220 bank->next = (id + 1) > bank->max ? bank->min : id + 1; 221 222 /* the lower 3 bits is bankid */ 223 *qpn = (id << 3) | bankid; 224 225 return 0; 226 } 227 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 228 struct ib_qp_init_attr *init_attr) 229 { 230 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 231 unsigned long num = 0; 232 u8 bankid; 233 int ret; 234 235 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { 236 num = 1; 237 } else { 238 mutex_lock(&qp_table->bank_mutex); 239 bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank); 240 241 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, 242 &num); 243 if (ret) { 244 ibdev_err(&hr_dev->ib_dev, 245 "failed to alloc QPN, ret = %d\n", ret); 246 mutex_unlock(&qp_table->bank_mutex); 247 return ret; 248 } 249 250 qp_table->bank[bankid].inuse++; 251 mutex_unlock(&qp_table->bank_mutex); 252 } 253 254 hr_qp->qpn = num; 255 256 return 0; 257 } 258 259 static void add_qp_to_list(struct hns_roce_dev *hr_dev, 260 struct hns_roce_qp *hr_qp, 261 struct ib_cq *send_cq, struct ib_cq *recv_cq) 262 { 263 struct hns_roce_cq *hr_send_cq, *hr_recv_cq; 264 unsigned long flags; 265 266 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; 267 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; 268 269 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); 270 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); 271 272 list_add_tail(&hr_qp->node, &hr_dev->qp_list); 273 if (hr_send_cq) 274 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); 275 if (hr_recv_cq) 276 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); 277 278 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); 279 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); 280 } 281 282 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, 283 struct hns_roce_qp *hr_qp, 284 struct ib_qp_init_attr *init_attr) 285 { 286 struct xarray *xa = &hr_dev->qp_table_xa; 287 int ret; 288 289 if (!hr_qp->qpn) 290 return -EINVAL; 291 292 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); 293 if (ret) 294 dev_err(hr_dev->dev, "failed to xa store for QPC\n"); 295 else 296 /* add QP to device's QP list for softwc */ 297 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, 298 init_attr->recv_cq); 299 300 return ret; 301 } 302 303 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 304 { 305 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 306 struct device *dev = hr_dev->dev; 307 int ret; 308 309 if (!hr_qp->qpn) 310 return -EINVAL; 311 312 /* Alloc memory for QPC */ 313 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); 314 if (ret) { 315 dev_err(dev, "failed to get QPC table\n"); 316 goto err_out; 317 } 318 319 /* Alloc memory for IRRL */ 320 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 321 if (ret) { 322 dev_err(dev, "failed to get IRRL table\n"); 323 goto err_put_qp; 324 } 325 326 if (hr_dev->caps.trrl_entry_sz) { 327 /* Alloc memory for TRRL */ 328 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, 329 hr_qp->qpn); 330 if (ret) { 331 dev_err(dev, "failed to get TRRL table\n"); 332 goto err_put_irrl; 333 } 334 } 335 336 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 337 /* Alloc memory for SCC CTX */ 338 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, 339 hr_qp->qpn); 340 if (ret) { 341 dev_err(dev, "failed to get SCC CTX table\n"); 342 goto err_put_trrl; 343 } 344 } 345 346 return 0; 347 348 err_put_trrl: 349 if (hr_dev->caps.trrl_entry_sz) 350 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 351 352 err_put_irrl: 353 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 354 355 err_put_qp: 356 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); 357 358 err_out: 359 return ret; 360 } 361 362 static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) 363 { 364 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); 365 } 366 367 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 368 { 369 struct xarray *xa = &hr_dev->qp_table_xa; 370 unsigned long flags; 371 372 list_del(&hr_qp->node); 373 374 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) 375 list_del(&hr_qp->sq_node); 376 377 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && 378 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) 379 list_del(&hr_qp->rq_node); 380 381 xa_lock_irqsave(xa, flags); 382 __xa_erase(xa, hr_qp->qpn); 383 xa_unlock_irqrestore(xa, flags); 384 } 385 386 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 387 { 388 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 389 390 if (hr_dev->caps.trrl_entry_sz) 391 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); 392 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); 393 } 394 395 static inline u8 get_qp_bankid(unsigned long qpn) 396 { 397 /* The lower 3 bits of QPN are used to hash to different banks */ 398 return (u8)(qpn & GENMASK(2, 0)); 399 } 400 401 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 402 { 403 u8 bankid; 404 405 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) 406 return; 407 408 if (hr_qp->qpn < hr_dev->caps.reserved_qps) 409 return; 410 411 bankid = get_qp_bankid(hr_qp->qpn); 412 413 ida_free(&hr_dev->qp_table.bank[bankid].ida, 414 hr_qp->qpn / HNS_ROCE_QP_BANK_NUM); 415 416 mutex_lock(&hr_dev->qp_table.bank_mutex); 417 hr_dev->qp_table.bank[bankid].inuse--; 418 mutex_unlock(&hr_dev->qp_table.bank_mutex); 419 } 420 421 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp, 422 bool user) 423 { 424 u32 max_sge = dev->caps.max_rq_sg; 425 426 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 427 return max_sge; 428 429 /* Reserve SGEs only for HIP08 in kernel; The userspace driver will 430 * calculate number of max_sge with reserved SGEs when allocating wqe 431 * buf, so there is no need to do this again in kernel. But the number 432 * may exceed the capacity of SGEs recorded in the firmware, so the 433 * kernel driver should just adapt the value accordingly. 434 */ 435 if (user) 436 max_sge = roundup_pow_of_two(max_sge + 1); 437 else 438 hr_qp->rq.rsv_sge = 1; 439 440 return max_sge; 441 } 442 443 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, 444 struct hns_roce_qp *hr_qp, int has_rq, bool user) 445 { 446 u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user); 447 u32 cnt; 448 449 /* If srq exist, set zero for relative number of rq */ 450 if (!has_rq) { 451 hr_qp->rq.wqe_cnt = 0; 452 hr_qp->rq.max_gs = 0; 453 cap->max_recv_wr = 0; 454 cap->max_recv_sge = 0; 455 456 return 0; 457 } 458 459 /* Check the validity of QP support capacity */ 460 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || 461 cap->max_recv_sge > max_sge) { 462 ibdev_err(&hr_dev->ib_dev, 463 "RQ config error, depth = %u, sge = %u\n", 464 cap->max_recv_wr, cap->max_recv_sge); 465 return -EINVAL; 466 } 467 468 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); 469 if (cnt > hr_dev->caps.max_wqes) { 470 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", 471 cap->max_recv_wr); 472 return -EINVAL; 473 } 474 475 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + 476 hr_qp->rq.rsv_sge); 477 478 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * 479 hr_qp->rq.max_gs); 480 481 hr_qp->rq.wqe_cnt = cnt; 482 483 cap->max_recv_wr = cnt; 484 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; 485 486 return 0; 487 } 488 489 static u32 get_max_inline_data(struct hns_roce_dev *hr_dev, 490 struct ib_qp_cap *cap) 491 { 492 if (cap->max_inline_data) { 493 cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data); 494 return min(cap->max_inline_data, 495 hr_dev->caps.max_sq_inline); 496 } 497 498 return 0; 499 } 500 501 static void update_inline_data(struct hns_roce_qp *hr_qp, 502 struct ib_qp_cap *cap) 503 { 504 u32 sge_num = hr_qp->sq.ext_sge_cnt; 505 506 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { 507 if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI || 508 hr_qp->ibqp.qp_type == IB_QPT_UD)) 509 sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num); 510 511 cap->max_inline_data = max(cap->max_inline_data, 512 sge_num * HNS_ROCE_SGE_SIZE); 513 } 514 515 hr_qp->max_inline_data = cap->max_inline_data; 516 } 517 518 static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi, 519 u32 max_send_sge) 520 { 521 unsigned int std_sge_num; 522 unsigned int min_sge; 523 524 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; 525 min_sge = is_ud_or_gsi ? 1 : 0; 526 return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) : 527 min_sge; 528 } 529 530 static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi, 531 u32 max_inline_data) 532 { 533 unsigned int inline_sge; 534 535 inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE; 536 537 /* 538 * if max_inline_data less than 539 * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE, 540 * In addition to ud's mode, no need to extend sge. 541 */ 542 if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE) 543 inline_sge = 0; 544 545 return inline_sge; 546 } 547 548 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, 549 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) 550 { 551 bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI || 552 hr_qp->ibqp.qp_type == IB_QPT_UD); 553 unsigned int std_sge_num; 554 u32 inline_ext_sge = 0; 555 u32 ext_wqe_sge_cnt; 556 u32 total_sge_cnt; 557 558 cap->max_inline_data = get_max_inline_data(hr_dev, cap); 559 560 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; 561 std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE; 562 ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi, 563 cap->max_send_sge); 564 565 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { 566 inline_ext_sge = max(ext_wqe_sge_cnt, 567 get_sge_num_from_max_inl_data(is_ud_or_gsi, 568 cap->max_inline_data)); 569 hr_qp->sq.ext_sge_cnt = inline_ext_sge ? 570 roundup_pow_of_two(inline_ext_sge) : 0; 571 572 hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num)); 573 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); 574 575 ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt; 576 } else { 577 hr_qp->sq.max_gs = max(1U, cap->max_send_sge); 578 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); 579 hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs; 580 } 581 582 /* If the number of extended sge is not zero, they MUST use the 583 * space of HNS_HW_PAGE_SIZE at least. 584 */ 585 if (ext_wqe_sge_cnt) { 586 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt); 587 hr_qp->sge.sge_cnt = max(total_sge_cnt, 588 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE); 589 } 590 591 update_inline_data(hr_qp, cap); 592 } 593 594 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, 595 struct ib_qp_cap *cap, 596 struct hns_roce_ib_create_qp *ucmd) 597 { 598 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); 599 u8 max_sq_stride = ilog2(roundup_sq_stride); 600 601 /* Sanity check SQ size before proceeding */ 602 if (ucmd->log_sq_stride > max_sq_stride || 603 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { 604 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); 605 return -EINVAL; 606 } 607 608 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { 609 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", 610 cap->max_send_sge); 611 return -EINVAL; 612 } 613 614 return 0; 615 } 616 617 static int set_user_sq_size(struct hns_roce_dev *hr_dev, 618 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, 619 struct hns_roce_ib_create_qp *ucmd) 620 { 621 struct ib_device *ibdev = &hr_dev->ib_dev; 622 u32 cnt = 0; 623 int ret; 624 625 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || 626 cnt > hr_dev->caps.max_wqes) 627 return -EINVAL; 628 629 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); 630 if (ret) { 631 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n", 632 ret); 633 return ret; 634 } 635 636 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 637 638 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; 639 hr_qp->sq.wqe_cnt = cnt; 640 cap->max_send_sge = hr_qp->sq.max_gs; 641 642 return 0; 643 } 644 645 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev, 646 struct hns_roce_qp *hr_qp, 647 struct hns_roce_buf_attr *buf_attr) 648 { 649 int buf_size; 650 int idx = 0; 651 652 hr_qp->buff_size = 0; 653 654 /* SQ WQE */ 655 hr_qp->sq.offset = 0; 656 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, 657 hr_qp->sq.wqe_shift); 658 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 659 buf_attr->region[idx].size = buf_size; 660 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; 661 idx++; 662 hr_qp->buff_size += buf_size; 663 } 664 665 /* extend SGE WQE in SQ */ 666 hr_qp->sge.offset = hr_qp->buff_size; 667 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, 668 hr_qp->sge.sge_shift); 669 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 670 buf_attr->region[idx].size = buf_size; 671 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; 672 idx++; 673 hr_qp->buff_size += buf_size; 674 } 675 676 /* RQ WQE */ 677 hr_qp->rq.offset = hr_qp->buff_size; 678 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, 679 hr_qp->rq.wqe_shift); 680 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { 681 buf_attr->region[idx].size = buf_size; 682 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; 683 idx++; 684 hr_qp->buff_size += buf_size; 685 } 686 687 if (hr_qp->buff_size < 1) 688 return -EINVAL; 689 690 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; 691 buf_attr->region_count = idx; 692 693 return 0; 694 } 695 696 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, 697 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) 698 { 699 struct ib_device *ibdev = &hr_dev->ib_dev; 700 u32 cnt; 701 702 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || 703 cap->max_send_sge > hr_dev->caps.max_sq_sg) { 704 ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n"); 705 return -EINVAL; 706 } 707 708 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); 709 if (cnt > hr_dev->caps.max_wqes) { 710 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n", 711 cnt); 712 return -EINVAL; 713 } 714 715 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); 716 hr_qp->sq.wqe_cnt = cnt; 717 718 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); 719 720 /* sync the parameters of kernel QP to user's configuration */ 721 cap->max_send_wr = cnt; 722 cap->max_send_sge = hr_qp->sq.max_gs; 723 724 return 0; 725 } 726 727 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) 728 { 729 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) 730 return 0; 731 732 return 1; 733 } 734 735 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) 736 { 737 if (attr->qp_type == IB_QPT_XRC_INI || 738 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 739 !attr->cap.max_recv_wr) 740 return 0; 741 742 return 1; 743 } 744 745 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 746 struct ib_qp_init_attr *init_attr, 747 struct ib_udata *udata, unsigned long addr) 748 { 749 struct ib_device *ibdev = &hr_dev->ib_dev; 750 struct hns_roce_buf_attr buf_attr = {}; 751 int ret; 752 753 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr); 754 if (ret) { 755 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret); 756 goto err_inline; 757 } 758 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, 759 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, 760 udata, addr); 761 if (ret) { 762 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret); 763 goto err_inline; 764 } 765 766 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) 767 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; 768 769 return 0; 770 771 err_inline: 772 773 return ret; 774 } 775 776 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) 777 { 778 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); 779 } 780 781 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev, 782 struct ib_qp_init_attr *init_attr, 783 struct ib_udata *udata, 784 struct hns_roce_ib_create_qp_resp *resp, 785 struct hns_roce_ib_create_qp *ucmd) 786 { 787 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 788 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 789 hns_roce_qp_has_sq(init_attr) && 790 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); 791 } 792 793 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev, 794 struct ib_qp_init_attr *init_attr, 795 struct ib_udata *udata, 796 struct hns_roce_ib_create_qp_resp *resp) 797 { 798 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 799 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && 800 hns_roce_qp_has_rq(init_attr)); 801 } 802 803 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, 804 struct ib_qp_init_attr *init_attr) 805 { 806 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && 807 hns_roce_qp_has_rq(init_attr)); 808 } 809 810 static int qp_mmap_entry(struct hns_roce_qp *hr_qp, 811 struct hns_roce_dev *hr_dev, 812 struct ib_udata *udata, 813 struct hns_roce_ib_create_qp_resp *resp) 814 { 815 struct hns_roce_ucontext *uctx = 816 rdma_udata_to_drv_context(udata, 817 struct hns_roce_ucontext, ibucontext); 818 struct rdma_user_mmap_entry *rdma_entry; 819 u64 address; 820 821 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; 822 823 hr_qp->dwqe_mmap_entry = 824 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, 825 HNS_ROCE_DWQE_SIZE, 826 HNS_ROCE_MMAP_TYPE_DWQE); 827 828 if (!hr_qp->dwqe_mmap_entry) { 829 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); 830 return -ENOMEM; 831 } 832 833 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; 834 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); 835 836 return 0; 837 } 838 839 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, 840 struct hns_roce_qp *hr_qp, 841 struct ib_qp_init_attr *init_attr, 842 struct ib_udata *udata, 843 struct hns_roce_ib_create_qp *ucmd, 844 struct hns_roce_ib_create_qp_resp *resp) 845 { 846 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, 847 struct hns_roce_ucontext, ibucontext); 848 struct ib_device *ibdev = &hr_dev->ib_dev; 849 int ret; 850 851 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { 852 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); 853 if (ret) { 854 ibdev_err(ibdev, 855 "failed to map user SQ doorbell, ret = %d.\n", 856 ret); 857 goto err_out; 858 } 859 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; 860 } 861 862 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { 863 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); 864 if (ret) { 865 ibdev_err(ibdev, 866 "failed to map user RQ doorbell, ret = %d.\n", 867 ret); 868 goto err_sdb; 869 } 870 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 871 } 872 873 return 0; 874 875 err_sdb: 876 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 877 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 878 err_out: 879 return ret; 880 } 881 882 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev, 883 struct hns_roce_qp *hr_qp, 884 struct ib_qp_init_attr *init_attr) 885 { 886 struct ib_device *ibdev = &hr_dev->ib_dev; 887 int ret; 888 889 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 890 hr_qp->sq.db_reg = hr_dev->mem_base + 891 HNS_ROCE_DWQE_SIZE * hr_qp->qpn; 892 else 893 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + 894 DB_REG_OFFSET * hr_dev->priv_uar.index; 895 896 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + 897 DB_REG_OFFSET * hr_dev->priv_uar.index; 898 899 if (kernel_qp_has_rdb(hr_dev, init_attr)) { 900 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); 901 if (ret) { 902 ibdev_err(ibdev, 903 "failed to alloc kernel RQ doorbell, ret = %d.\n", 904 ret); 905 return ret; 906 } 907 *hr_qp->rdb.db_record = 0; 908 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; 909 } 910 911 return 0; 912 } 913 914 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 915 struct ib_qp_init_attr *init_attr, 916 struct ib_udata *udata, 917 struct hns_roce_ib_create_qp *ucmd, 918 struct hns_roce_ib_create_qp_resp *resp) 919 { 920 int ret; 921 922 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) 923 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; 924 925 if (udata) { 926 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { 927 ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); 928 if (ret) 929 return ret; 930 } 931 932 ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, 933 resp); 934 if (ret) 935 goto err_remove_qp; 936 } else { 937 ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); 938 if (ret) 939 return ret; 940 } 941 942 return 0; 943 944 err_remove_qp: 945 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 946 qp_user_mmap_entry_remove(hr_qp); 947 948 return ret; 949 } 950 951 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 952 struct ib_udata *udata) 953 { 954 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( 955 udata, struct hns_roce_ucontext, ibucontext); 956 957 if (udata) { 958 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 959 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); 960 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) 961 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); 962 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) 963 qp_user_mmap_entry_remove(hr_qp); 964 } else { 965 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 966 hns_roce_free_db(hr_dev, &hr_qp->rdb); 967 } 968 } 969 970 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, 971 struct hns_roce_qp *hr_qp) 972 { 973 struct ib_device *ibdev = &hr_dev->ib_dev; 974 u64 *sq_wrid = NULL; 975 u64 *rq_wrid = NULL; 976 int ret; 977 978 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); 979 if (ZERO_OR_NULL_PTR(sq_wrid)) { 980 ibdev_err(ibdev, "failed to alloc SQ wrid.\n"); 981 return -ENOMEM; 982 } 983 984 if (hr_qp->rq.wqe_cnt) { 985 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); 986 if (ZERO_OR_NULL_PTR(rq_wrid)) { 987 ibdev_err(ibdev, "failed to alloc RQ wrid.\n"); 988 ret = -ENOMEM; 989 goto err_sq; 990 } 991 } 992 993 hr_qp->sq.wrid = sq_wrid; 994 hr_qp->rq.wrid = rq_wrid; 995 return 0; 996 err_sq: 997 kfree(sq_wrid); 998 999 return ret; 1000 } 1001 1002 static void free_kernel_wrid(struct hns_roce_qp *hr_qp) 1003 { 1004 kfree(hr_qp->rq.wrid); 1005 kfree(hr_qp->sq.wrid); 1006 } 1007 1008 static void default_congest_type(struct hns_roce_dev *hr_dev, 1009 struct hns_roce_qp *hr_qp) 1010 { 1011 if (hr_qp->ibqp.qp_type == IB_QPT_UD || 1012 hr_qp->ibqp.qp_type == IB_QPT_GSI) 1013 hr_qp->cong_type = CONG_TYPE_DCQCN; 1014 else 1015 hr_qp->cong_type = hr_dev->caps.default_cong_type; 1016 } 1017 1018 static int set_congest_type(struct hns_roce_qp *hr_qp, 1019 struct hns_roce_ib_create_qp *ucmd) 1020 { 1021 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); 1022 1023 switch (ucmd->cong_type_flags) { 1024 case HNS_ROCE_CREATE_QP_FLAGS_DCQCN: 1025 hr_qp->cong_type = CONG_TYPE_DCQCN; 1026 break; 1027 case HNS_ROCE_CREATE_QP_FLAGS_LDCP: 1028 hr_qp->cong_type = CONG_TYPE_LDCP; 1029 break; 1030 case HNS_ROCE_CREATE_QP_FLAGS_HC3: 1031 hr_qp->cong_type = CONG_TYPE_HC3; 1032 break; 1033 case HNS_ROCE_CREATE_QP_FLAGS_DIP: 1034 hr_qp->cong_type = CONG_TYPE_DIP; 1035 break; 1036 default: 1037 return -EINVAL; 1038 } 1039 1040 if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap)) 1041 return -EOPNOTSUPP; 1042 1043 if (hr_qp->ibqp.qp_type == IB_QPT_UD && 1044 hr_qp->cong_type != CONG_TYPE_DCQCN) 1045 return -EOPNOTSUPP; 1046 1047 return 0; 1048 } 1049 1050 static int set_congest_param(struct hns_roce_dev *hr_dev, 1051 struct hns_roce_qp *hr_qp, 1052 struct hns_roce_ib_create_qp *ucmd) 1053 { 1054 if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE) 1055 return set_congest_type(hr_qp, ucmd); 1056 1057 default_congest_type(hr_dev, hr_qp); 1058 1059 return 0; 1060 } 1061 1062 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1063 struct ib_qp_init_attr *init_attr, 1064 struct ib_udata *udata, 1065 struct hns_roce_ib_create_qp *ucmd) 1066 { 1067 struct ib_device *ibdev = &hr_dev->ib_dev; 1068 struct hns_roce_ucontext *uctx; 1069 int ret; 1070 1071 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 1072 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; 1073 else 1074 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; 1075 1076 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, 1077 hns_roce_qp_has_rq(init_attr), !!udata); 1078 if (ret) { 1079 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n", 1080 ret); 1081 return ret; 1082 } 1083 1084 if (udata) { 1085 ret = ib_copy_from_udata(ucmd, udata, 1086 min(udata->inlen, sizeof(*ucmd))); 1087 if (ret) { 1088 ibdev_err(ibdev, 1089 "failed to copy QP ucmd, ret = %d\n", ret); 1090 return ret; 1091 } 1092 1093 uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, 1094 ibucontext); 1095 hr_qp->config = uctx->config; 1096 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); 1097 if (ret) 1098 ibdev_err(ibdev, 1099 "failed to set user SQ size, ret = %d.\n", 1100 ret); 1101 1102 ret = set_congest_param(hr_dev, hr_qp, ucmd); 1103 if (ret) 1104 return ret; 1105 } else { 1106 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) 1107 hr_qp->config = HNS_ROCE_EXSGE_FLAGS; 1108 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); 1109 if (ret) 1110 ibdev_err(ibdev, 1111 "failed to set kernel SQ size, ret = %d.\n", 1112 ret); 1113 1114 default_congest_type(hr_dev, hr_qp); 1115 } 1116 1117 return ret; 1118 } 1119 1120 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, 1121 struct ib_qp_init_attr *init_attr, 1122 struct ib_udata *udata, 1123 struct hns_roce_qp *hr_qp) 1124 { 1125 struct hns_roce_ib_create_qp_resp resp = {}; 1126 struct ib_device *ibdev = &hr_dev->ib_dev; 1127 struct hns_roce_ib_create_qp ucmd = {}; 1128 int ret; 1129 1130 mutex_init(&hr_qp->mutex); 1131 spin_lock_init(&hr_qp->sq.lock); 1132 spin_lock_init(&hr_qp->rq.lock); 1133 1134 hr_qp->state = IB_QPS_RESET; 1135 hr_qp->flush_flag = 0; 1136 1137 if (init_attr->create_flags) 1138 return -EOPNOTSUPP; 1139 1140 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); 1141 if (ret) { 1142 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); 1143 goto err_out; 1144 } 1145 1146 if (!udata) { 1147 ret = alloc_kernel_wrid(hr_dev, hr_qp); 1148 if (ret) { 1149 ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", 1150 ret); 1151 goto err_out; 1152 } 1153 } 1154 1155 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); 1156 if (ret) { 1157 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret); 1158 goto err_buf; 1159 } 1160 1161 ret = alloc_qpn(hr_dev, hr_qp, init_attr); 1162 if (ret) { 1163 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret); 1164 goto err_qpn; 1165 } 1166 1167 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); 1168 if (ret) { 1169 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n", 1170 ret); 1171 goto err_db; 1172 } 1173 1174 ret = alloc_qpc(hr_dev, hr_qp); 1175 if (ret) { 1176 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n", 1177 ret); 1178 goto err_qpc; 1179 } 1180 1181 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); 1182 if (ret) { 1183 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret); 1184 goto err_store; 1185 } 1186 1187 if (udata) { 1188 resp.cap_flags = hr_qp->en_flags; 1189 ret = ib_copy_to_udata(udata, &resp, 1190 min(udata->outlen, sizeof(resp))); 1191 if (ret) { 1192 ibdev_err(ibdev, "copy qp resp failed!\n"); 1193 goto err_store; 1194 } 1195 } 1196 1197 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { 1198 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); 1199 if (ret) 1200 goto err_flow_ctrl; 1201 } 1202 1203 hr_qp->ibqp.qp_num = hr_qp->qpn; 1204 hr_qp->event = hns_roce_ib_qp_event; 1205 refcount_set(&hr_qp->refcount, 1); 1206 init_completion(&hr_qp->free); 1207 1208 return 0; 1209 1210 err_flow_ctrl: 1211 hns_roce_qp_remove(hr_dev, hr_qp); 1212 err_store: 1213 free_qpc(hr_dev, hr_qp); 1214 err_qpc: 1215 free_qp_db(hr_dev, hr_qp, udata); 1216 err_db: 1217 free_qpn(hr_dev, hr_qp); 1218 err_qpn: 1219 free_qp_buf(hr_dev, hr_qp); 1220 err_buf: 1221 free_kernel_wrid(hr_qp); 1222 err_out: 1223 mutex_destroy(&hr_qp->mutex); 1224 return ret; 1225 } 1226 1227 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, 1228 struct ib_udata *udata) 1229 { 1230 if (refcount_dec_and_test(&hr_qp->refcount)) 1231 complete(&hr_qp->free); 1232 wait_for_completion(&hr_qp->free); 1233 1234 free_qpc(hr_dev, hr_qp); 1235 free_qpn(hr_dev, hr_qp); 1236 free_qp_buf(hr_dev, hr_qp); 1237 free_kernel_wrid(hr_qp); 1238 free_qp_db(hr_dev, hr_qp, udata); 1239 mutex_destroy(&hr_qp->mutex); 1240 } 1241 1242 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, 1243 bool is_user) 1244 { 1245 switch (type) { 1246 case IB_QPT_XRC_INI: 1247 case IB_QPT_XRC_TGT: 1248 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) 1249 goto out; 1250 break; 1251 case IB_QPT_UD: 1252 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && 1253 is_user) 1254 goto out; 1255 break; 1256 case IB_QPT_RC: 1257 case IB_QPT_GSI: 1258 break; 1259 default: 1260 goto out; 1261 } 1262 1263 return 0; 1264 1265 out: 1266 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); 1267 1268 return -EOPNOTSUPP; 1269 } 1270 1271 int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, 1272 struct ib_udata *udata) 1273 { 1274 struct ib_device *ibdev = qp->device; 1275 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); 1276 struct hns_roce_qp *hr_qp = to_hr_qp(qp); 1277 int ret; 1278 1279 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); 1280 if (ret) 1281 goto err_out; 1282 1283 if (init_attr->qp_type == IB_QPT_XRC_TGT) 1284 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; 1285 1286 if (init_attr->qp_type == IB_QPT_GSI) { 1287 hr_qp->port = init_attr->port_num - 1; 1288 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; 1289 } 1290 1291 ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp); 1292 if (ret) 1293 ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n", 1294 init_attr->qp_type, ret); 1295 1296 err_out: 1297 if (ret) 1298 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]); 1299 1300 return ret; 1301 } 1302 1303 int to_hr_qp_type(int qp_type) 1304 { 1305 switch (qp_type) { 1306 case IB_QPT_RC: 1307 return SERV_TYPE_RC; 1308 case IB_QPT_UD: 1309 case IB_QPT_GSI: 1310 return SERV_TYPE_UD; 1311 case IB_QPT_XRC_INI: 1312 case IB_QPT_XRC_TGT: 1313 return SERV_TYPE_XRC; 1314 default: 1315 return -1; 1316 } 1317 } 1318 1319 static int check_mtu_validate(struct hns_roce_dev *hr_dev, 1320 struct hns_roce_qp *hr_qp, 1321 struct ib_qp_attr *attr, int attr_mask) 1322 { 1323 enum ib_mtu active_mtu; 1324 int p; 1325 1326 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1327 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); 1328 1329 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && 1330 attr->path_mtu > hr_dev->caps.max_mtu) || 1331 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { 1332 ibdev_err(&hr_dev->ib_dev, 1333 "attr path_mtu(%d)invalid while modify qp", 1334 attr->path_mtu); 1335 return -EINVAL; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1342 int attr_mask) 1343 { 1344 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1345 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1346 int p; 1347 1348 if ((attr_mask & IB_QP_PORT) && 1349 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { 1350 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", 1351 attr->port_num); 1352 return -EINVAL; 1353 } 1354 1355 if (attr_mask & IB_QP_PKEY_INDEX) { 1356 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; 1357 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { 1358 ibdev_err(&hr_dev->ib_dev, 1359 "invalid attr, pkey_index = %u.\n", 1360 attr->pkey_index); 1361 return -EINVAL; 1362 } 1363 } 1364 1365 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1366 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { 1367 ibdev_err(&hr_dev->ib_dev, 1368 "invalid attr, max_rd_atomic = %u.\n", 1369 attr->max_rd_atomic); 1370 return -EINVAL; 1371 } 1372 1373 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1374 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { 1375 ibdev_err(&hr_dev->ib_dev, 1376 "invalid attr, max_dest_rd_atomic = %u.\n", 1377 attr->max_dest_rd_atomic); 1378 return -EINVAL; 1379 } 1380 1381 if (attr_mask & IB_QP_PATH_MTU) 1382 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask); 1383 1384 return 0; 1385 } 1386 1387 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1388 int attr_mask, struct ib_udata *udata) 1389 { 1390 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); 1391 struct hns_roce_ib_modify_qp_resp resp = {}; 1392 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 1393 enum ib_qp_state cur_state, new_state; 1394 int ret = -EINVAL; 1395 1396 mutex_lock(&hr_qp->mutex); 1397 1398 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) 1399 goto out; 1400 1401 cur_state = hr_qp->state; 1402 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1403 1404 if (ibqp->uobject && 1405 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { 1406 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { 1407 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); 1408 1409 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) 1410 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); 1411 } else { 1412 ibdev_warn(&hr_dev->ib_dev, 1413 "flush cqe is not supported in userspace!\n"); 1414 goto out; 1415 } 1416 } 1417 1418 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, 1419 attr_mask)) { 1420 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); 1421 goto out; 1422 } 1423 1424 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask); 1425 if (ret) 1426 goto out; 1427 1428 if (cur_state == new_state && cur_state == IB_QPS_RESET) 1429 goto out; 1430 1431 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, 1432 new_state, udata); 1433 if (ret) 1434 goto out; 1435 1436 if (udata && udata->outlen) { 1437 resp.tc_mode = hr_qp->tc_mode; 1438 resp.priority = hr_qp->sl; 1439 ret = ib_copy_to_udata(udata, &resp, 1440 min(udata->outlen, sizeof(resp))); 1441 if (ret) 1442 ibdev_err_ratelimited(&hr_dev->ib_dev, 1443 "failed to copy modify qp resp.\n"); 1444 } 1445 1446 out: 1447 mutex_unlock(&hr_qp->mutex); 1448 if (ret) 1449 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]); 1450 1451 return ret; 1452 } 1453 1454 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1455 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1456 { 1457 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1458 __acquire(&send_cq->lock); 1459 __acquire(&recv_cq->lock); 1460 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1461 spin_lock_irq(&send_cq->lock); 1462 __acquire(&recv_cq->lock); 1463 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1464 spin_lock_irq(&recv_cq->lock); 1465 __acquire(&send_cq->lock); 1466 } else if (send_cq == recv_cq) { 1467 spin_lock_irq(&send_cq->lock); 1468 __acquire(&recv_cq->lock); 1469 } else if (send_cq->cqn < recv_cq->cqn) { 1470 spin_lock_irq(&send_cq->lock); 1471 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1472 } else { 1473 spin_lock_irq(&recv_cq->lock); 1474 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1475 } 1476 } 1477 1478 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, 1479 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) 1480 __releases(&recv_cq->lock) 1481 { 1482 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1483 __release(&recv_cq->lock); 1484 __release(&send_cq->lock); 1485 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1486 __release(&recv_cq->lock); 1487 spin_unlock(&send_cq->lock); 1488 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) { 1489 __release(&send_cq->lock); 1490 spin_unlock(&recv_cq->lock); 1491 } else if (send_cq == recv_cq) { 1492 __release(&recv_cq->lock); 1493 spin_unlock_irq(&send_cq->lock); 1494 } else if (send_cq->cqn < recv_cq->cqn) { 1495 spin_unlock(&recv_cq->lock); 1496 spin_unlock_irq(&send_cq->lock); 1497 } else { 1498 spin_unlock(&send_cq->lock); 1499 spin_unlock_irq(&recv_cq->lock); 1500 } 1501 } 1502 1503 static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) 1504 { 1505 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); 1506 } 1507 1508 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1509 { 1510 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); 1511 } 1512 1513 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n) 1514 { 1515 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); 1516 } 1517 1518 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n) 1519 { 1520 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); 1521 } 1522 1523 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, 1524 struct ib_cq *ib_cq) 1525 { 1526 struct hns_roce_cq *hr_cq; 1527 u32 cur; 1528 1529 cur = hr_wq->head - hr_wq->tail; 1530 if (likely(cur + nreq < hr_wq->wqe_cnt)) 1531 return false; 1532 1533 hr_cq = to_hr_cq(ib_cq); 1534 spin_lock(&hr_cq->lock); 1535 cur = hr_wq->head - hr_wq->tail; 1536 spin_unlock(&hr_cq->lock); 1537 1538 return cur + nreq >= hr_wq->wqe_cnt; 1539 } 1540 1541 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) 1542 { 1543 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; 1544 unsigned int reserved_from_bot; 1545 unsigned int i; 1546 1547 qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps, 1548 sizeof(u32), GFP_KERNEL); 1549 if (!qp_table->idx_table.spare_idx) 1550 return -ENOMEM; 1551 1552 mutex_init(&qp_table->scc_mutex); 1553 mutex_init(&qp_table->bank_mutex); 1554 xa_init(&hr_dev->qp_table_xa); 1555 1556 reserved_from_bot = hr_dev->caps.reserved_qps; 1557 1558 for (i = 0; i < reserved_from_bot; i++) { 1559 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; 1560 hr_dev->qp_table.bank[get_qp_bankid(i)].min++; 1561 } 1562 1563 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) { 1564 ida_init(&hr_dev->qp_table.bank[i].ida); 1565 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / 1566 HNS_ROCE_QP_BANK_NUM - 1; 1567 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; 1568 } 1569 1570 return 0; 1571 } 1572 1573 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) 1574 { 1575 int i; 1576 1577 for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) 1578 ida_destroy(&hr_dev->qp_table.bank[i].ida); 1579 mutex_destroy(&hr_dev->qp_table.bank_mutex); 1580 mutex_destroy(&hr_dev->qp_table.scc_mutex); 1581 kfree(hr_dev->qp_table.idx_table.spare_idx); 1582 } 1583