Lines Matching +full:wr +full:- +full:hold
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
13 * different than the last_key. Set last_key to -1
29 switch (mr->ibmr.type) { in mr_check_range()
35 if (iova < mr->ibmr.iova || in mr_check_range()
36 iova + length > mr->ibmr.iova + mr->ibmr.length) { in mr_check_range()
38 return -EINVAL; in mr_check_range()
44 return -EINVAL; in mr_check_range()
50 u32 key = mr->elem.index << 8 | rxe_get_next_key(-1); in rxe_mr_init()
52 /* set ibmr->l/rkey and also copy into private l/rkey in rxe_mr_init()
57 mr->lkey = mr->ibmr.lkey = key; in rxe_mr_init()
58 mr->rkey = mr->ibmr.rkey = key; in rxe_mr_init()
60 mr->access = access; in rxe_mr_init()
61 mr->ibmr.page_size = PAGE_SIZE; in rxe_mr_init()
62 mr->page_mask = PAGE_MASK; in rxe_mr_init()
63 mr->page_shift = PAGE_SHIFT; in rxe_mr_init()
64 mr->state = RXE_MR_STATE_INVALID; in rxe_mr_init()
71 mr->state = RXE_MR_STATE_VALID; in rxe_mr_init_dma()
72 mr->ibmr.type = IB_MR_TYPE_DMA; in rxe_mr_init_dma()
77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift); in rxe_mr_iova_to_index()
82 return iova & (mr_page_size(mr) - 1); in rxe_mr_iova_to_page_offset()
96 XA_STATE(xas, &mr->page_list, 0); in rxe_mr_fill_pages_from_sgt()
99 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT); in rxe_mr_fill_pages_from_sgt()
101 __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0); in rxe_mr_fill_pages_from_sgt()
112 xas_set_err(&xas, -EINVAL); in rxe_mr_fill_pages_from_sgt()
137 xa_init(&mr->page_list); in rxe_mr_init_user()
139 umem = ib_umem_get(&rxe->ib_dev, start, length, access); in rxe_mr_init_user()
146 err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt); in rxe_mr_init_user()
152 mr->umem = umem; in rxe_mr_init_user()
153 mr->ibmr.type = IB_MR_TYPE_USER; in rxe_mr_init_user()
154 mr->state = RXE_MR_STATE_VALID; in rxe_mr_init_user()
161 XA_STATE(xas, &mr->page_list, 0); in rxe_mr_alloc()
165 xa_init(&mr->page_list); in rxe_mr_alloc()
183 mr->num_buf = num_buf; in rxe_mr_alloc()
199 mr->state = RXE_MR_STATE_FREE; in rxe_mr_init_fast()
200 mr->ibmr.type = IB_MR_TYPE_MEM_REG; in rxe_mr_init_fast()
212 bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT); in rxe_set_page()
217 return -EINVAL; in rxe_set_page()
220 if (unlikely(mr->nbuf == mr->num_buf)) in rxe_set_page()
221 return -ENOMEM; in rxe_set_page()
223 err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL)); in rxe_set_page()
227 mr->nbuf++; in rxe_set_page()
237 mr->nbuf = 0; in rxe_map_mr_sg()
238 mr->page_shift = ilog2(page_size); in rxe_map_mr_sg()
239 mr->page_mask = ~((u64)page_size - 1); in rxe_map_mr_sg()
240 mr->page_offset = mr->ibmr.iova & (page_size - 1); in rxe_map_mr_sg()
255 page = xa_load(&mr->page_list, index); in rxe_mr_copy_xarray()
257 return -EFAULT; in rxe_mr_copy_xarray()
260 mr_page_size(mr) - page_offset); in rxe_mr_copy_xarray()
270 length -= bytes; in rxe_mr_copy_xarray()
280 unsigned int page_offset = dma_addr & (PAGE_SIZE - 1); in rxe_mr_copy_dma()
288 PAGE_SIZE - page_offset); in rxe_mr_copy_dma()
300 length -= bytes; in rxe_mr_copy_dma()
313 return -EINVAL; in rxe_mr_copy()
315 if (mr->ibmr.type == IB_MR_TYPE_DMA) { in rxe_mr_copy()
341 struct rxe_sge *sge = &dma->sge[dma->cur_sge]; in copy_data()
342 int offset = dma->sge_offset; in copy_data()
343 int resid = dma->resid; in copy_data()
352 err = -EINVAL; in copy_data()
356 if (sge->length && (offset < sge->length)) { in copy_data()
357 mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL); in copy_data()
359 err = -EINVAL; in copy_data()
367 if (offset >= sge->length) { in copy_data()
373 dma->cur_sge++; in copy_data()
376 if (dma->cur_sge >= dma->num_sge) { in copy_data()
377 err = -ENOSPC; in copy_data()
381 if (sge->length) { in copy_data()
382 mr = lookup_mr(pd, access, sge->lkey, in copy_data()
385 err = -EINVAL; in copy_data()
393 if (bytes > sge->length - offset) in copy_data()
394 bytes = sge->length - offset; in copy_data()
397 iova = sge->addr + offset; in copy_data()
403 resid -= bytes; in copy_data()
404 length -= bytes; in copy_data()
409 dma->sge_offset = offset; in copy_data()
410 dma->resid = resid; in copy_data()
435 return -EINVAL; in rxe_flush_pmem_iova()
440 if (mr->ibmr.type == IB_MR_TYPE_DMA) in rxe_flush_pmem_iova()
441 return -EFAULT; in rxe_flush_pmem_iova()
449 page = xa_load(&mr->page_list, index); in rxe_flush_pmem_iova()
452 return -EFAULT; in rxe_flush_pmem_iova()
454 mr_page_size(mr) - page_offset); in rxe_flush_pmem_iova()
460 length -= bytes; in rxe_flush_pmem_iova()
479 if (unlikely(mr->state != RXE_MR_STATE_VALID)) { in rxe_mr_do_atomic_op()
484 if (mr->ibmr.type == IB_MR_TYPE_DMA) { in rxe_mr_do_atomic_op()
485 page_offset = iova & (PAGE_SIZE - 1); in rxe_mr_do_atomic_op()
498 page = xa_load(&mr->page_list, index); in rxe_mr_do_atomic_op()
535 /* See IBA oA19-28 */ in rxe_mr_do_atomic_write()
536 if (unlikely(mr->state != RXE_MR_STATE_VALID)) { in rxe_mr_do_atomic_write()
541 if (mr->ibmr.type == IB_MR_TYPE_DMA) { in rxe_mr_do_atomic_write()
542 page_offset = iova & (PAGE_SIZE - 1); in rxe_mr_do_atomic_write()
548 /* See IBA oA19-28 */ in rxe_mr_do_atomic_write()
556 page = xa_load(&mr->page_list, index); in rxe_mr_do_atomic_write()
585 struct rxe_sge *sge = &dma->sge[dma->cur_sge]; in advance_dma_data()
586 int offset = dma->sge_offset; in advance_dma_data()
587 int resid = dma->resid; in advance_dma_data()
592 if (offset >= sge->length) { in advance_dma_data()
594 dma->cur_sge++; in advance_dma_data()
596 if (dma->cur_sge >= dma->num_sge) in advance_dma_data()
597 return -ENOSPC; in advance_dma_data()
602 if (bytes > sge->length - offset) in advance_dma_data()
603 bytes = sge->length - offset; in advance_dma_data()
606 resid -= bytes; in advance_dma_data()
607 length -= bytes; in advance_dma_data()
610 dma->sge_offset = offset; in advance_dma_data()
611 dma->resid = resid; in advance_dma_data()
620 struct rxe_dev *rxe = to_rdev(pd->ibpd.device); in lookup_mr()
623 mr = rxe_pool_get_index(&rxe->mr_pool, index); in lookup_mr()
627 if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) || in lookup_mr()
628 (type == RXE_LOOKUP_REMOTE && mr->rkey != key) || in lookup_mr()
629 mr_pd(mr) != pd || ((access & mr->access) != access) || in lookup_mr()
630 mr->state != RXE_MR_STATE_VALID)) { in lookup_mr()
640 struct rxe_dev *rxe = to_rdev(qp->ibqp.device); in rxe_invalidate_mr()
645 mr = rxe_pool_get_index(&rxe->mr_pool, key >> 8); in rxe_invalidate_mr()
648 ret = -EINVAL; in rxe_invalidate_mr()
652 remote = mr->access & RXE_ACCESS_REMOTE; in rxe_invalidate_mr()
653 if (remote ? (key != mr->rkey) : (key != mr->lkey)) { in rxe_invalidate_mr()
654 rxe_dbg_mr(mr, "wr key (%#x) doesn't match mr key (%#x)\n", in rxe_invalidate_mr()
655 key, (remote ? mr->rkey : mr->lkey)); in rxe_invalidate_mr()
656 ret = -EINVAL; in rxe_invalidate_mr()
660 if (atomic_read(&mr->num_mw) > 0) { in rxe_invalidate_mr()
662 ret = -EINVAL; in rxe_invalidate_mr()
666 if (unlikely(mr->ibmr.type != IB_MR_TYPE_MEM_REG)) { in rxe_invalidate_mr()
667 rxe_dbg_mr(mr, "Type (%d) is wrong\n", mr->ibmr.type); in rxe_invalidate_mr()
668 ret = -EINVAL; in rxe_invalidate_mr()
672 mr->state = RXE_MR_STATE_FREE; in rxe_invalidate_mr()
682 * user is expected to hold a reference on the ib mr until the
690 struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr); in rxe_reg_fast_mr()
691 u32 key = wqe->wr.wr.reg.key; in rxe_reg_fast_mr()
692 u32 access = wqe->wr.wr.reg.access; in rxe_reg_fast_mr()
695 if (unlikely(mr->state != RXE_MR_STATE_FREE)) { in rxe_reg_fast_mr()
696 rxe_dbg_mr(mr, "mr->lkey = 0x%x not free\n", mr->lkey); in rxe_reg_fast_mr()
697 return -EINVAL; in rxe_reg_fast_mr()
701 if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) { in rxe_reg_fast_mr()
702 rxe_dbg_mr(mr, "qp->pd and mr->pd don't match\n"); in rxe_reg_fast_mr()
703 return -EINVAL; in rxe_reg_fast_mr()
707 if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) { in rxe_reg_fast_mr()
708 rxe_dbg_mr(mr, "key = 0x%x has wrong index mr->lkey = 0x%x\n", in rxe_reg_fast_mr()
709 key, mr->lkey); in rxe_reg_fast_mr()
710 return -EINVAL; in rxe_reg_fast_mr()
713 mr->access = access; in rxe_reg_fast_mr()
714 mr->lkey = key; in rxe_reg_fast_mr()
715 mr->rkey = key; in rxe_reg_fast_mr()
716 mr->ibmr.iova = wqe->wr.wr.reg.mr->iova; in rxe_reg_fast_mr()
717 mr->state = RXE_MR_STATE_VALID; in rxe_reg_fast_mr()
727 ib_umem_release(mr->umem); in rxe_mr_cleanup()
729 if (mr->ibmr.type != IB_MR_TYPE_DMA) in rxe_mr_cleanup()
730 xa_destroy(&mr->page_list); in rxe_mr_cleanup()