Lines Matching +full:cfg +full:- +full:space
15 * - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
55 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
56 struct ib_device *ibdev = &hr_dev->ib_dev;
61 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
65 return -ENOMEM;
68 mr->key = hw_index_to_key(id); /* MR key */
70 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
79 ida_free(&mtpt_ida->ida, id);
85 unsigned long obj = key_to_hw_index(mr->key);
87 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
88 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
94 struct ib_device *ibdev = &hr_dev->ib_dev;
95 bool is_fast = mr->type == MR_TYPE_FRMR;
99 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
101 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
102 buf_attr.region[0].size = mr->size;
103 buf_attr.region[0].hopnum = mr->pbl_hop_num;
105 buf_attr.user_access = mr->access;
108 buf_attr.iova = mr->iova;
113 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
114 hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
121 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
122 mr->pbl_hop_num = buf_attr.region[0].hopnum;
129 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
134 struct ib_device *ibdev = &hr_dev->ib_dev;
137 if (mr->enabled) {
139 key_to_hw_index(mr->key) &
140 (hr_dev->caps.num_mtpts - 1));
153 unsigned long mtpt_idx = key_to_hw_index(mr->key);
155 struct device *dev = hr_dev->dev;
164 if (mr->type != MR_TYPE_FRMR)
165 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
167 ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
174 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
180 mr->enabled = 1;
190 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
192 ida_init(&mtpt_ida->ida);
193 mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
194 mtpt_ida->min = hr_dev->caps.reserved_mrws;
199 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
205 return ERR_PTR(-ENOMEM);
207 mr->type = MR_TYPE_DMA;
208 mr->pd = to_hr_pd(pd)->pdn;
209 mr->access = acc;
212 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
221 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
223 return &mr->ibmr;
237 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
242 ret = -EOPNOTSUPP;
248 ret = -ENOMEM;
252 mr->iova = virt_addr;
253 mr->size = length;
254 mr->pd = to_hr_pd(pd)->pdn;
255 mr->access = access_flags;
256 mr->type = MR_TYPE_MR;
270 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
272 return &mr->ibmr;
281 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REG_ERR_CNT]);
291 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
292 struct ib_device *ib_dev = &hr_dev->ib_dev;
298 if (!mr->enabled) {
299 ret = -EINVAL;
308 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
310 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
320 mr->enabled = 0;
321 mr->iova = virt_addr;
322 mr->size = length;
325 mr->pd = to_hr_pd(pd)->pdn;
328 mr->access = mr_access_flags;
340 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
353 mr->enabled = 1;
360 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REREG_ERR_CNT]);
369 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
372 if (hr_dev->hw->dereg_mr)
373 hr_dev->hw->dereg_mr(hr_dev);
384 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
385 struct device *dev = hr_dev->dev;
390 return ERR_PTR(-EINVAL);
395 return ERR_PTR(-EINVAL);
400 return ERR_PTR(-ENOMEM);
402 mr->type = MR_TYPE_FRMR;
403 mr->pd = to_hr_pd(pd)->pdn;
404 mr->size = max_num_sg * (1 << PAGE_SHIFT);
419 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
420 mr->ibmr.length = mr->size;
422 return &mr->ibmr;
437 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
438 mr->page_list[mr->npages++] = addr;
442 return -ENOBUFS;
449 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
450 struct ib_device *ibdev = &hr_dev->ib_dev;
452 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
456 ibmr->page_size < HNS_HW_PAGE_SIZE ||
457 ibmr->page_size > HNS_HW_MAX_PAGE_SIZE)
460 mr->npages = 0;
461 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
463 if (!mr->page_list)
469 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
473 mtr->hem_cfg.region[0].offset = 0;
474 mtr->hem_cfg.region[0].count = mr->npages;
475 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
476 mtr->hem_cfg.region_count = 1;
477 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
482 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
486 kvfree(mr->page_list);
487 mr->page_list = NULL;
502 offset = region->offset;
503 end = offset + region->count;
507 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
510 return -ENOBUFS;
528 for (i = 0; i < attr->region_count; i++)
529 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
530 attr->region[i].hopnum > 0)
535 * memory must in a continuous space accessed by direct mode.
545 for (i = 0; i < attr->region_count; i++)
546 size += attr->region[i].size;
552 * check the given pages in continuous address space
562 if (pages[i] - pages[i - 1] != page_size)
571 if (mtr->umem) {
572 ib_umem_release(mtr->umem);
573 mtr->umem = NULL;
577 if (mtr->kmem) {
578 hns_roce_buf_free(hr_dev, mtr->kmem);
579 mtr->kmem = NULL;
587 struct ib_device *ibdev = &hr_dev->ib_dev;
593 mtr->kmem = NULL;
594 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
595 buf_attr->user_access);
596 if (IS_ERR(mtr->umem)) {
598 PTR_ERR(mtr->umem));
599 return -ENOMEM;
602 mtr->umem = NULL;
603 mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
604 buf_attr->page_shift,
607 if (IS_ERR(mtr->kmem)) {
609 PTR_ERR(mtr->kmem));
610 return PTR_ERR(mtr->kmem);
623 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
624 region = &mtr->hem_cfg.region[i];
625 page_cnt += region->count;
633 /* When HEM buffer uses 0-level addressing, the page size is
638 return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1;
643 struct ib_device *ibdev = &hr_dev->ib_dev;
651 mtr->hem_cfg.buf_pg_shift;
655 return -ENOMEM;
657 if (mtr->umem)
659 mtr->umem, page_shift);
662 mtr->kmem, page_shift);
667 ret = -ENOBUFS;
675 mtr->umem ? "umtr" : "kmtr", ret, npage);
676 ret = -ENOBUFS;
694 struct ib_device *ibdev = &hr_dev->ib_dev;
703 if (mtr->hem_cfg.is_direct) {
704 mtr->hem_cfg.root_ba = pages[0];
708 for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
710 r = &mtr->hem_cfg.region[i];
712 if (r->offset + r->count > page_cnt) {
713 ret = -EINVAL;
716 i, r->offset, r->count, page_cnt);
720 ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
721 page_cnt - mapped_cnt);
725 i, r->offset, ret);
733 ret = -ENOBUFS;
741 static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
750 if (mtt_cnt > cfg->region_count)
751 return -EINVAL;
753 for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
755 npage = cfg->region[mtt_count].offset;
759 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
766 return -ENOENT;
783 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
790 left -= npage;
796 return -ENOENT;
804 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
809 return -EINVAL;
812 if (cfg->is_direct) {
814 ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
817 start_index = offset >> cfg->buf_pg_shift;
830 if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem)
833 page_sz = ib_umem_find_best_pgsz(mtr->umem,
834 hr_dev->caps.page_size_cap,
835 buf_attr->iova);
837 return -EINVAL;
839 buf_attr->page_shift = order_base_2(page_sz);
848 #define INVALID_HOPNUM -1
850 size_t buf_pg_sz = 1 << buf_attr->page_shift;
851 struct ib_device *ibdev = &hr_dev->ib_dev;
858 if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
862 if (mtr->umem)
863 ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
865 ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
879 return -EINVAL;
882 buf_attr->region[0].hopnum = hop_num;
890 struct ib_device *ibdev = &hr_dev->ib_dev;
892 if (attr->region_count > ARRAY_SIZE(attr->region) ||
893 attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
896 attr->region_count, attr->page_shift);
907 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
915 return -EINVAL;
918 cfg->is_direct = !mtr_has_mtt(attr);
919 cfg->region_count = attr->region_count;
923 cfg->buf_pg_count = 1;
925 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
928 buf_pg_sz = 1 << attr->page_shift;
929 cfg->buf_pg_count = mtr->umem ?
930 ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) :
932 cfg->buf_pg_shift = attr->page_shift;
933 pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0;
939 for (page_cnt = 0, i = 0; i < attr->region_count; i++) {
940 r = &cfg->region[i];
941 r->offset = page_cnt;
942 buf_size = hr_hw_page_align(attr->region[i].size + pgoff);
943 if (attr->type == MTR_PBL && mtr->umem)
944 r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
946 r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
949 page_cnt += r->count;
950 r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count);
958 return int_pow(ba_per_bt, hopnum - 1);
965 unsigned long cap = hr_dev->caps.page_size_cap;
978 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
979 re = &mtr->hem_cfg.region[i];
980 if (re->hopnum == 0)
983 pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
984 ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
997 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1000 hns_roce_hem_list_init(&mtr->hem_list);
1001 if (!cfg->is_direct) {
1004 return -ERANGE;
1006 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
1007 cfg->region, cfg->region_count,
1011 cfg->root_ba = mtr->hem_list.root_ba;
1012 cfg->ba_pg_shift = ba_page_shift;
1014 cfg->ba_pg_shift = cfg->buf_pg_shift;
1022 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1026 * hns_roce_mtr_create - Create hns memory translate region.
1031 * @ba_page_shift: page shift for multi-hop base address table
1032 * @udata: user space context, if it's NULL, means kernel space
1040 struct ib_device *ibdev = &hr_dev->ib_dev;
1047 if (buf_attr->mtt_only) {
1048 mtr->umem = NULL;
1049 mtr->kmem = NULL;
1077 if (buf_attr->mtt_only)
1099 /* release multi-hop addressing resource */
1100 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);