Lines Matching +full:cap +full:- +full:get

15  *      - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
45 struct device *dev = hr_dev->dev; in hns_roce_qp_lookup()
49 xa_lock_irqsave(&hr_dev->qp_table_xa, flags); in hns_roce_qp_lookup()
52 refcount_inc(&qp->refcount); in hns_roce_qp_lookup()
53 xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags); in hns_roce_qp_lookup()
67 struct device *dev = flush_work->hr_dev->dev; in flush_work_handle()
75 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { in flush_work_handle()
76 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); in flush_work_handle()
86 if (refcount_dec_and_test(&hr_qp->refcount)) in flush_work_handle()
87 complete(&hr_qp->free); in flush_work_handle()
92 struct hns_roce_work *flush_work = &hr_qp->flush_work; in init_flush_work()
95 spin_lock_irqsave(&hr_qp->flush_lock, flags); in init_flush_work()
97 if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) { in init_flush_work()
98 spin_unlock_irqrestore(&hr_qp->flush_lock, flags); in init_flush_work()
102 refcount_inc(&hr_qp->refcount); in init_flush_work()
103 queue_work(hr_dev->irq_workq, &flush_work->work); in init_flush_work()
104 spin_unlock_irqrestore(&hr_qp->flush_lock, flags); in init_flush_work()
118 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag)) in flush_cqe()
130 qp->event(qp, (enum hns_roce_event)event_type); in hns_roce_qp_event()
132 if (refcount_dec_and_test(&qp->refcount)) in hns_roce_qp_event()
133 complete(&qp->free); in hns_roce_qp_event()
144 qp->state = IB_QPS_ERR; in hns_roce_flush_cqe()
147 if (refcount_dec_and_test(&qp->refcount)) in hns_roce_flush_cqe()
148 complete(&qp->free); in hns_roce_flush_cqe()
154 struct ib_qp *ibqp = &hr_qp->ibqp; in hns_roce_ib_qp_event()
157 if (ibqp->event_handler) { in hns_roce_ib_qp_event()
158 event.device = ibqp->device; in hns_roce_ib_qp_event()
188 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", in hns_roce_ib_qp_event()
189 type, hr_qp->qpn); in hns_roce_ib_qp_event()
192 ibqp->event_handler(&event, ibqp->qp_context); in hns_roce_ib_qp_event()
205 struct ib_cq *scq = init_attr->send_cq; in get_least_load_bankid_for_qp()
213 cqn = to_hr_cq(scq)->cqn; in get_least_load_bankid_for_qp()
234 id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL); in alloc_qpn_with_bankid()
236 id = ida_alloc_range(&bank->ida, bank->min, bank->max, in alloc_qpn_with_bankid()
243 bank->next = (id + 1) > bank->max ? bank->min : id + 1; in alloc_qpn_with_bankid()
253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpn()
258 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { in alloc_qpn()
261 mutex_lock(&qp_table->bank_mutex); in alloc_qpn()
262 bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank); in alloc_qpn()
264 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, in alloc_qpn()
267 ibdev_err(&hr_dev->ib_dev, in alloc_qpn()
269 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
273 qp_table->bank[bankid].inuse++; in alloc_qpn()
274 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
277 hr_qp->qpn = num; in alloc_qpn()
292 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); in add_qp_to_list()
295 list_add_tail(&hr_qp->node, &hr_dev->qp_list); in add_qp_to_list()
297 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); in add_qp_to_list()
299 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); in add_qp_to_list()
302 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); in add_qp_to_list()
309 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_store()
312 if (!hr_qp->qpn) in hns_roce_qp_store()
313 return -EINVAL; in hns_roce_qp_store()
315 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); in hns_roce_qp_store()
317 dev_err(hr_dev->dev, "failed to xa store for QPC\n"); in hns_roce_qp_store()
320 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, in hns_roce_qp_store()
321 init_attr->recv_cq); in hns_roce_qp_store()
328 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpc()
329 struct device *dev = hr_dev->dev; in alloc_qpc()
332 if (!hr_qp->qpn) in alloc_qpc()
333 return -EINVAL; in alloc_qpc()
336 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
338 dev_err(dev, "failed to get QPC table\n"); in alloc_qpc()
343 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
345 dev_err(dev, "failed to get IRRL table\n"); in alloc_qpc()
349 if (hr_dev->caps.trrl_entry_sz) { in alloc_qpc()
351 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, in alloc_qpc()
352 hr_qp->qpn); in alloc_qpc()
354 dev_err(dev, "failed to get TRRL table\n"); in alloc_qpc()
359 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { in alloc_qpc()
361 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, in alloc_qpc()
362 hr_qp->qpn); in alloc_qpc()
364 dev_err(dev, "failed to get SCC CTX table\n"); in alloc_qpc()
372 if (hr_dev->caps.trrl_entry_sz) in alloc_qpc()
373 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); in alloc_qpc()
376 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
379 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
387 rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); in qp_user_mmap_entry_remove()
392 struct xarray *xa = &hr_dev->qp_table_xa; in hns_roce_qp_remove()
395 list_del(&hr_qp->node); in hns_roce_qp_remove()
397 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) in hns_roce_qp_remove()
398 list_del(&hr_qp->sq_node); in hns_roce_qp_remove()
400 if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI && in hns_roce_qp_remove()
401 hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT) in hns_roce_qp_remove()
402 list_del(&hr_qp->rq_node); in hns_roce_qp_remove()
405 __xa_erase(xa, hr_qp->qpn); in hns_roce_qp_remove()
411 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in free_qpc()
413 if (hr_dev->caps.trrl_entry_sz) in free_qpc()
414 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); in free_qpc()
415 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in free_qpc()
428 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) in free_qpn()
431 if (hr_qp->qpn < hr_dev->caps.reserved_qps) in free_qpn()
434 bankid = get_qp_bankid(hr_qp->qpn); in free_qpn()
436 ida_free(&hr_dev->qp_table.bank[bankid].ida, in free_qpn()
437 hr_qp->qpn / HNS_ROCE_QP_BANK_NUM); in free_qpn()
439 mutex_lock(&hr_dev->qp_table.bank_mutex); in free_qpn()
440 hr_dev->qp_table.bank[bankid].inuse--; in free_qpn()
441 mutex_unlock(&hr_dev->qp_table.bank_mutex); in free_qpn()
447 u32 max_sge = dev->caps.max_rq_sg; in proc_rq_sge()
449 if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in proc_rq_sge()
461 hr_qp->rq.rsv_sge = 1; in proc_rq_sge()
466 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, in set_rq_size() argument
474 hr_qp->rq.wqe_cnt = 0; in set_rq_size()
475 hr_qp->rq.max_gs = 0; in set_rq_size()
476 cap->max_recv_wr = 0; in set_rq_size()
477 cap->max_recv_sge = 0; in set_rq_size()
483 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes || in set_rq_size()
484 cap->max_recv_sge > max_sge) { in set_rq_size()
485 ibdev_err(&hr_dev->ib_dev, in set_rq_size()
487 cap->max_recv_wr, cap->max_recv_sge); in set_rq_size()
488 return -EINVAL; in set_rq_size()
491 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes)); in set_rq_size()
492 if (cnt > hr_dev->caps.max_wqes) { in set_rq_size()
493 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n", in set_rq_size()
494 cap->max_recv_wr); in set_rq_size()
495 return -EINVAL; in set_rq_size()
498 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + in set_rq_size()
499 hr_qp->rq.rsv_sge); in set_rq_size()
501 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz * in set_rq_size()
502 hr_qp->rq.max_gs); in set_rq_size()
504 hr_qp->rq.wqe_cnt = cnt; in set_rq_size()
506 cap->max_recv_wr = cnt; in set_rq_size()
507 cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; in set_rq_size()
513 struct ib_qp_cap *cap) in get_max_inline_data() argument
515 if (cap->max_inline_data) { in get_max_inline_data()
516 cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data); in get_max_inline_data()
517 return min(cap->max_inline_data, in get_max_inline_data()
518 hr_dev->caps.max_sq_inline); in get_max_inline_data()
525 struct ib_qp_cap *cap) in update_inline_data() argument
527 u32 sge_num = hr_qp->sq.ext_sge_cnt; in update_inline_data()
529 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { in update_inline_data()
530 if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI || in update_inline_data()
531 hr_qp->ibqp.qp_type == IB_QPT_UD)) in update_inline_data()
534 cap->max_inline_data = max(cap->max_inline_data, in update_inline_data()
538 hr_qp->max_inline_data = cap->max_inline_data; in update_inline_data()
549 return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) : in get_sge_num_from_max_send_sge()
574 struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap) in set_ext_sge_param() argument
576 bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI || in set_ext_sge_param()
577 hr_qp->ibqp.qp_type == IB_QPT_UD); in set_ext_sge_param()
583 cap->max_inline_data = get_max_inline_data(hr_dev, cap); in set_ext_sge_param()
585 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; in set_ext_sge_param()
588 cap->max_send_sge); in set_ext_sge_param()
590 if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) { in set_ext_sge_param()
593 cap->max_inline_data)); in set_ext_sge_param()
594 hr_qp->sq.ext_sge_cnt = inline_ext_sge ? in set_ext_sge_param()
597 hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num)); in set_ext_sge_param()
598 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); in set_ext_sge_param()
600 ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt; in set_ext_sge_param()
602 hr_qp->sq.max_gs = max(1U, cap->max_send_sge); in set_ext_sge_param()
603 hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg); in set_ext_sge_param()
604 hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs; in set_ext_sge_param()
612 hr_qp->sge.sge_cnt = max(total_sge_cnt, in set_ext_sge_param()
616 update_inline_data(hr_qp, cap); in set_ext_sge_param()
620 struct ib_qp_cap *cap, in check_sq_size_with_integrity() argument
623 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); in check_sq_size_with_integrity()
627 if (ucmd->log_sq_stride > max_sq_stride || in check_sq_size_with_integrity()
628 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { in check_sq_size_with_integrity()
629 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n"); in check_sq_size_with_integrity()
630 return -EINVAL; in check_sq_size_with_integrity()
633 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { in check_sq_size_with_integrity()
634 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n", in check_sq_size_with_integrity()
635 cap->max_send_sge); in check_sq_size_with_integrity()
636 return -EINVAL; in check_sq_size_with_integrity()
643 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, in set_user_sq_size() argument
646 struct ib_device *ibdev = &hr_dev->ib_dev; in set_user_sq_size()
650 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || in set_user_sq_size()
651 cnt > hr_dev->caps.max_wqes) in set_user_sq_size()
652 return -EINVAL; in set_user_sq_size()
654 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); in set_user_sq_size()
661 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); in set_user_sq_size()
663 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
664 hr_qp->sq.wqe_cnt = cnt; in set_user_sq_size()
676 hr_qp->buff_size = 0; in set_wqe_buf_attr()
679 hr_qp->sq.offset = 0; in set_wqe_buf_attr()
680 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt, in set_wqe_buf_attr()
681 hr_qp->sq.wqe_shift); in set_wqe_buf_attr()
682 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { in set_wqe_buf_attr()
683 buf_attr->region[idx].size = buf_size; in set_wqe_buf_attr()
684 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num; in set_wqe_buf_attr()
686 hr_qp->buff_size += buf_size; in set_wqe_buf_attr()
690 hr_qp->sge.offset = hr_qp->buff_size; in set_wqe_buf_attr()
691 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt, in set_wqe_buf_attr()
692 hr_qp->sge.sge_shift); in set_wqe_buf_attr()
693 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { in set_wqe_buf_attr()
694 buf_attr->region[idx].size = buf_size; in set_wqe_buf_attr()
695 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num; in set_wqe_buf_attr()
697 hr_qp->buff_size += buf_size; in set_wqe_buf_attr()
701 hr_qp->rq.offset = hr_qp->buff_size; in set_wqe_buf_attr()
702 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt, in set_wqe_buf_attr()
703 hr_qp->rq.wqe_shift); in set_wqe_buf_attr()
704 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) { in set_wqe_buf_attr()
705 buf_attr->region[idx].size = buf_size; in set_wqe_buf_attr()
706 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num; in set_wqe_buf_attr()
708 hr_qp->buff_size += buf_size; in set_wqe_buf_attr()
711 if (hr_qp->buff_size < 1) in set_wqe_buf_attr()
712 return -EINVAL; in set_wqe_buf_attr()
714 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; in set_wqe_buf_attr()
715 buf_attr->region_count = idx; in set_wqe_buf_attr()
721 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) in set_kernel_sq_size() argument
723 struct ib_device *ibdev = &hr_dev->ib_dev; in set_kernel_sq_size()
726 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes || in set_kernel_sq_size()
727 cap->max_send_sge > hr_dev->caps.max_sq_sg) { in set_kernel_sq_size()
729 return -EINVAL; in set_kernel_sq_size()
732 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes)); in set_kernel_sq_size()
733 if (cnt > hr_dev->caps.max_wqes) { in set_kernel_sq_size()
736 return -EINVAL; in set_kernel_sq_size()
739 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); in set_kernel_sq_size()
740 hr_qp->sq.wqe_cnt = cnt; in set_kernel_sq_size()
742 set_ext_sge_param(hr_dev, cnt, hr_qp, cap); in set_kernel_sq_size()
745 cap->max_send_wr = cnt; in set_kernel_sq_size()
752 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) in hns_roce_qp_has_sq()
760 if (attr->qp_type == IB_QPT_XRC_INI || in hns_roce_qp_has_rq()
761 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || in hns_roce_qp_has_rq()
762 !attr->cap.max_recv_wr) in hns_roce_qp_has_rq()
772 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_qp_buf()
781 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr, in alloc_qp_buf()
782 PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz, in alloc_qp_buf()
789 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) in alloc_qp_buf()
790 hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; in alloc_qp_buf()
801 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr); in free_qp_buf()
810 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in user_qp_has_sdb()
811 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && in user_qp_has_sdb()
813 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr)); in user_qp_has_sdb()
821 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in user_qp_has_rdb()
822 udata->outlen >= offsetofend(typeof(*resp), cap_flags) && in user_qp_has_rdb()
829 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) && in kernel_qp_has_rdb()
844 address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; in qp_mmap_entry()
846 hr_qp->dwqe_mmap_entry = in qp_mmap_entry()
847 hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, in qp_mmap_entry()
851 if (!hr_qp->dwqe_mmap_entry) { in qp_mmap_entry()
852 ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); in qp_mmap_entry()
853 return -ENOMEM; in qp_mmap_entry()
856 rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; in qp_mmap_entry()
857 resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); in qp_mmap_entry()
873 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_user_qp_db()
877 ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); in alloc_user_qp_db()
884 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; in alloc_user_qp_db()
888 ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); in alloc_user_qp_db()
895 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; in alloc_user_qp_db()
902 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in alloc_user_qp_db()
911 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_kernel_qp_db()
914 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in alloc_kernel_qp_db()
915 hr_qp->sq.db_reg = hr_dev->mem_base + in alloc_kernel_qp_db()
916 HNS_ROCE_DWQE_SIZE * hr_qp->qpn; in alloc_kernel_qp_db()
918 hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + in alloc_kernel_qp_db()
919 DB_REG_OFFSET * hr_dev->priv_uar.index; in alloc_kernel_qp_db()
921 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + in alloc_kernel_qp_db()
922 DB_REG_OFFSET * hr_dev->priv_uar.index; in alloc_kernel_qp_db()
925 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); in alloc_kernel_qp_db()
932 *hr_qp->rdb.db_record = 0; in alloc_kernel_qp_db()
933 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; in alloc_kernel_qp_db()
947 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) in alloc_qp_db()
948 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; in alloc_qp_db()
951 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { in alloc_qp_db()
970 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) in alloc_qp_db()
983 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in free_qp_db()
984 hns_roce_db_unmap_user(uctx, &hr_qp->rdb); in free_qp_db()
985 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) in free_qp_db()
986 hns_roce_db_unmap_user(uctx, &hr_qp->sdb); in free_qp_db()
987 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) in free_qp_db()
990 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in free_qp_db()
991 hns_roce_free_db(hr_dev, &hr_qp->rdb); in free_qp_db()
998 struct ib_device *ibdev = &hr_dev->ib_dev; in alloc_kernel_wrid()
1003 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); in alloc_kernel_wrid()
1006 return -ENOMEM; in alloc_kernel_wrid()
1009 if (hr_qp->rq.wqe_cnt) { in alloc_kernel_wrid()
1010 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); in alloc_kernel_wrid()
1013 ret = -ENOMEM; in alloc_kernel_wrid()
1018 hr_qp->sq.wrid = sq_wrid; in alloc_kernel_wrid()
1019 hr_qp->rq.wrid = rq_wrid; in alloc_kernel_wrid()
1029 kfree(hr_qp->rq.wrid); in free_kernel_wrid()
1030 kfree(hr_qp->sq.wrid); in free_kernel_wrid()
1036 if (hr_qp->ibqp.qp_type == IB_QPT_UD || in default_congest_type()
1037 hr_qp->ibqp.qp_type == IB_QPT_GSI) in default_congest_type()
1038 hr_qp->cong_type = CONG_TYPE_DCQCN; in default_congest_type()
1040 hr_qp->cong_type = hr_dev->caps.default_cong_type; in default_congest_type()
1046 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); in set_congest_type()
1048 switch (ucmd->cong_type_flags) { in set_congest_type()
1050 hr_qp->cong_type = CONG_TYPE_DCQCN; in set_congest_type()
1053 hr_qp->cong_type = CONG_TYPE_LDCP; in set_congest_type()
1056 hr_qp->cong_type = CONG_TYPE_HC3; in set_congest_type()
1059 hr_qp->cong_type = CONG_TYPE_DIP; in set_congest_type()
1062 return -EINVAL; in set_congest_type()
1065 if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap)) in set_congest_type()
1066 return -EOPNOTSUPP; in set_congest_type()
1068 if (hr_qp->ibqp.qp_type == IB_QPT_UD && in set_congest_type()
1069 hr_qp->cong_type != CONG_TYPE_DCQCN) in set_congest_type()
1070 return -EOPNOTSUPP; in set_congest_type()
1079 if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE) in set_congest_param()
1092 struct ib_device *ibdev = &hr_dev->ib_dev; in set_qp_param()
1096 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) in set_qp_param()
1097 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; in set_qp_param()
1099 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; in set_qp_param()
1101 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp, in set_qp_param()
1111 min(udata->inlen, sizeof(*ucmd))); in set_qp_param()
1120 hr_qp->config = uctx->config; in set_qp_param()
1121 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); in set_qp_param()
1131 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in set_qp_param()
1132 hr_qp->config = HNS_ROCE_EXSGE_FLAGS; in set_qp_param()
1134 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); in set_qp_param()
1149 struct hns_roce_work *flush_work = &hr_qp->flush_work; in hns_roce_create_qp_common()
1151 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_create_qp_common()
1155 mutex_init(&hr_qp->mutex); in hns_roce_create_qp_common()
1156 spin_lock_init(&hr_qp->sq.lock); in hns_roce_create_qp_common()
1157 spin_lock_init(&hr_qp->rq.lock); in hns_roce_create_qp_common()
1158 spin_lock_init(&hr_qp->flush_lock); in hns_roce_create_qp_common()
1160 hr_qp->state = IB_QPS_RESET; in hns_roce_create_qp_common()
1161 hr_qp->flush_flag = 0; in hns_roce_create_qp_common()
1162 flush_work->hr_dev = hr_dev; in hns_roce_create_qp_common()
1163 INIT_WORK(&flush_work->work, flush_work_handle); in hns_roce_create_qp_common()
1165 if (init_attr->create_flags) in hns_roce_create_qp_common()
1166 return -EOPNOTSUPP; in hns_roce_create_qp_common()
1216 resp.cap_flags = hr_qp->en_flags; in hns_roce_create_qp_common()
1218 min(udata->outlen, sizeof(resp))); in hns_roce_create_qp_common()
1225 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { in hns_roce_create_qp_common()
1226 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); in hns_roce_create_qp_common()
1231 hr_qp->ibqp.qp_num = hr_qp->qpn; in hns_roce_create_qp_common()
1232 hr_qp->event = hns_roce_ib_qp_event; in hns_roce_create_qp_common()
1233 refcount_set(&hr_qp->refcount, 1); in hns_roce_create_qp_common()
1234 init_completion(&hr_qp->free); in hns_roce_create_qp_common()
1251 mutex_destroy(&hr_qp->mutex); in hns_roce_create_qp_common()
1258 if (refcount_dec_and_test(&hr_qp->refcount)) in hns_roce_qp_destroy()
1259 complete(&hr_qp->free); in hns_roce_qp_destroy()
1260 wait_for_completion(&hr_qp->free); in hns_roce_qp_destroy()
1267 mutex_destroy(&hr_qp->mutex); in hns_roce_qp_destroy()
1276 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) in check_qp_type()
1280 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && in check_qp_type()
1294 ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type); in check_qp_type()
1296 return -EOPNOTSUPP; in check_qp_type()
1302 struct ib_device *ibdev = qp->device; in hns_roce_create_qp()
1307 ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); in hns_roce_create_qp()
1311 if (init_attr->qp_type == IB_QPT_XRC_TGT) in hns_roce_create_qp()
1312 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn; in hns_roce_create_qp()
1314 if (init_attr->qp_type == IB_QPT_GSI) { in hns_roce_create_qp()
1315 hr_qp->port = init_attr->port_num - 1; in hns_roce_create_qp()
1316 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; in hns_roce_create_qp()
1322 init_attr->qp_type, ret); in hns_roce_create_qp()
1326 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]); in hns_roce_create_qp()
1343 return -1; in to_hr_qp_type()
1354 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; in check_mtu_validate()
1355 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu); in check_mtu_validate()
1357 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 && in check_mtu_validate()
1358 attr->path_mtu > hr_dev->caps.max_mtu) || in check_mtu_validate()
1359 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) { in check_mtu_validate()
1360 ibdev_err(&hr_dev->ib_dev, in check_mtu_validate()
1362 attr->path_mtu); in check_mtu_validate()
1363 return -EINVAL; in check_mtu_validate()
1372 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_check_qp_attr()
1377 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { in hns_roce_check_qp_attr()
1378 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n", in hns_roce_check_qp_attr()
1379 attr->port_num); in hns_roce_check_qp_attr()
1380 return -EINVAL; in hns_roce_check_qp_attr()
1384 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port; in hns_roce_check_qp_attr()
1385 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) { in hns_roce_check_qp_attr()
1386 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1388 attr->pkey_index); in hns_roce_check_qp_attr()
1389 return -EINVAL; in hns_roce_check_qp_attr()
1394 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { in hns_roce_check_qp_attr()
1395 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1397 attr->max_rd_atomic); in hns_roce_check_qp_attr()
1398 return -EINVAL; in hns_roce_check_qp_attr()
1402 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) { in hns_roce_check_qp_attr()
1403 ibdev_err(&hr_dev->ib_dev, in hns_roce_check_qp_attr()
1405 attr->max_dest_rd_atomic); in hns_roce_check_qp_attr()
1406 return -EINVAL; in hns_roce_check_qp_attr()
1418 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_modify_qp()
1422 int ret = -EINVAL; in hns_roce_modify_qp()
1424 mutex_lock(&hr_qp->mutex); in hns_roce_modify_qp()
1426 if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state) in hns_roce_modify_qp()
1429 cur_state = hr_qp->state; in hns_roce_modify_qp()
1430 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; in hns_roce_modify_qp()
1432 if (ibqp->uobject && in hns_roce_modify_qp()
1434 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) { in hns_roce_modify_qp()
1435 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); in hns_roce_modify_qp()
1437 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in hns_roce_modify_qp()
1438 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); in hns_roce_modify_qp()
1440 ibdev_warn(&hr_dev->ib_dev, in hns_roce_modify_qp()
1446 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, in hns_roce_modify_qp()
1448 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); in hns_roce_modify_qp()
1459 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, in hns_roce_modify_qp()
1464 if (udata && udata->outlen) { in hns_roce_modify_qp()
1465 resp.tc_mode = hr_qp->tc_mode; in hns_roce_modify_qp()
1466 resp.priority = hr_qp->sl; in hns_roce_modify_qp()
1468 min(udata->outlen, sizeof(resp))); in hns_roce_modify_qp()
1470 ibdev_err_ratelimited(&hr_dev->ib_dev, in hns_roce_modify_qp()
1475 mutex_unlock(&hr_qp->mutex); in hns_roce_modify_qp()
1477 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]); in hns_roce_modify_qp()
1483 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in hns_roce_lock_cqs()
1486 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1487 __acquire(&recv_cq->lock); in hns_roce_lock_cqs()
1489 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1490 __acquire(&recv_cq->lock); in hns_roce_lock_cqs()
1492 spin_lock(&recv_cq->lock); in hns_roce_lock_cqs()
1493 __acquire(&send_cq->lock); in hns_roce_lock_cqs()
1495 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1496 __acquire(&recv_cq->lock); in hns_roce_lock_cqs()
1497 } else if (send_cq->cqn < recv_cq->cqn) { in hns_roce_lock_cqs()
1498 spin_lock(&send_cq->lock); in hns_roce_lock_cqs()
1499 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); in hns_roce_lock_cqs()
1501 spin_lock(&recv_cq->lock); in hns_roce_lock_cqs()
1502 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); in hns_roce_lock_cqs()
1507 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock) in hns_roce_unlock_cqs()
1508 __releases(&recv_cq->lock) in hns_roce_unlock_cqs()
1511 __release(&recv_cq->lock); in hns_roce_unlock_cqs()
1512 __release(&send_cq->lock); in hns_roce_unlock_cqs()
1514 __release(&recv_cq->lock); in hns_roce_unlock_cqs()
1515 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1517 __release(&send_cq->lock); in hns_roce_unlock_cqs()
1518 spin_unlock(&recv_cq->lock); in hns_roce_unlock_cqs()
1520 __release(&recv_cq->lock); in hns_roce_unlock_cqs()
1521 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1522 } else if (send_cq->cqn < recv_cq->cqn) { in hns_roce_unlock_cqs()
1523 spin_unlock(&recv_cq->lock); in hns_roce_unlock_cqs()
1524 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1526 spin_unlock(&send_cq->lock); in hns_roce_unlock_cqs()
1527 spin_unlock(&recv_cq->lock); in hns_roce_unlock_cqs()
1533 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); in get_wqe()
1538 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); in hns_roce_get_recv_wqe()
1543 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); in hns_roce_get_send_wqe()
1548 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift)); in hns_roce_get_extend_sge()
1557 cur = hr_wq->head - hr_wq->tail; in hns_roce_wq_overflow()
1558 if (likely(cur + nreq < hr_wq->wqe_cnt)) in hns_roce_wq_overflow()
1562 spin_lock(&hr_cq->lock); in hns_roce_wq_overflow()
1563 cur = hr_wq->head - hr_wq->tail; in hns_roce_wq_overflow()
1564 spin_unlock(&hr_cq->lock); in hns_roce_wq_overflow()
1566 return cur + nreq >= hr_wq->wqe_cnt; in hns_roce_wq_overflow()
1571 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in hns_roce_init_qp_table()
1575 mutex_init(&qp_table->scc_mutex); in hns_roce_init_qp_table()
1576 mutex_init(&qp_table->bank_mutex); in hns_roce_init_qp_table()
1577 xa_init(&hr_dev->qp_table_xa); in hns_roce_init_qp_table()
1578 xa_init(&qp_table->dip_xa); in hns_roce_init_qp_table()
1580 reserved_from_bot = hr_dev->caps.reserved_qps; in hns_roce_init_qp_table()
1583 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++; in hns_roce_init_qp_table()
1584 hr_dev->qp_table.bank[get_qp_bankid(i)].min++; in hns_roce_init_qp_table()
1588 ida_init(&hr_dev->qp_table.bank[i].ida); in hns_roce_init_qp_table()
1589 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps / in hns_roce_init_qp_table()
1590 HNS_ROCE_QP_BANK_NUM - 1; in hns_roce_init_qp_table()
1591 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min; in hns_roce_init_qp_table()
1602 ida_destroy(&hr_dev->qp_table.bank[i].ida); in hns_roce_cleanup_qp_table()
1603 xa_destroy(&hr_dev->qp_table.dip_xa); in hns_roce_cleanup_qp_table()
1604 xa_destroy(&hr_dev->qp_table_xa); in hns_roce_cleanup_qp_table()
1605 mutex_destroy(&hr_dev->qp_table.bank_mutex); in hns_roce_cleanup_qp_table()
1606 mutex_destroy(&hr_dev->qp_table.scc_mutex); in hns_roce_cleanup_qp_table()