Lines Matching +full:protection +full:- +full:domain

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
4 /* Copyright (c) 2008-2019, IBM Corporation */
9 #include <linux/dma-mapping.h>
32 mem = xa_load(&sdev->mem_xa, stag_index); in siw_mem_id2obj()
33 if (likely(mem && kref_get_unless_zero(&mem->ref))) { in siw_mem_id2obj()
44 int i, num_pages = umem->num_pages; in siw_umem_release()
46 if (umem->base_mem) in siw_umem_release()
47 ib_umem_release(umem->base_mem); in siw_umem_release()
50 kfree(umem->page_chunk[i].plist); in siw_umem_release()
51 num_pages -= PAGES_PER_CHUNK; in siw_umem_release()
53 kfree(umem->page_chunk); in siw_umem_release()
60 struct siw_device *sdev = to_siw_dev(pd->device); in siw_mr_add_mem()
66 return -ENOMEM; in siw_mr_add_mem()
68 mem->mem_obj = mem_obj; in siw_mr_add_mem()
69 mem->stag_valid = 0; in siw_mr_add_mem()
70 mem->sdev = sdev; in siw_mr_add_mem()
71 mem->va = start; in siw_mr_add_mem()
72 mem->len = len; in siw_mr_add_mem()
73 mem->pd = pd; in siw_mr_add_mem()
74 mem->perms = rights & IWARP_ACCESS_MASK; in siw_mr_add_mem()
75 kref_init(&mem->ref); in siw_mr_add_mem()
80 if (xa_alloc_cyclic(&sdev->mem_xa, &id, mem, limit, &next, in siw_mr_add_mem()
83 return -ENOMEM; in siw_mr_add_mem()
86 mr->mem = mem; in siw_mr_add_mem()
88 mem->stag = id << 8; in siw_mr_add_mem()
89 mr->base_mr.lkey = mr->base_mr.rkey = mem->stag; in siw_mr_add_mem()
96 struct siw_mem *mem = mr->mem, *found; in siw_mr_drop_mem()
98 mem->stag_valid = 0; in siw_mr_drop_mem()
103 found = xa_erase(&mem->sdev->mem_xa, mem->stag >> 8); in siw_mr_drop_mem()
112 siw_dbg_mem(mem, "free mem, pbl: %s\n", mem->is_pbl ? "y" : "n"); in siw_free_mem()
114 if (!mem->is_mw && mem->mem_obj) { in siw_free_mem()
115 if (mem->is_pbl == 0) in siw_free_mem()
116 siw_umem_release(mem->umem); in siw_free_mem()
118 kfree(mem->pbl); in siw_free_mem()
126 * Check protection domain, STAG state, access permissions and
129 * @pd: Protection Domain memory should belong to
139 if (!mem->stag_valid) { in siw_check_mem()
140 siw_dbg_pd(pd, "STag 0x%08x invalid\n", mem->stag); in siw_check_mem()
141 return -E_STAG_INVALID; in siw_check_mem()
143 if (mem->pd != pd) { in siw_check_mem()
144 siw_dbg_pd(pd, "STag 0x%08x: PD mismatch\n", mem->stag); in siw_check_mem()
145 return -E_PD_MISMATCH; in siw_check_mem()
150 if ((mem->perms & perms) < perms) { in siw_check_mem()
152 mem->perms, perms); in siw_check_mem()
153 return -E_ACCESS_PERM; in siw_check_mem()
158 if (addr < mem->va || addr + len > mem->va + mem->len) { in siw_check_mem()
164 (void *)(uintptr_t)mem->va, in siw_check_mem()
165 (void *)(uintptr_t)(mem->va + mem->len), in siw_check_mem()
166 mem->stag); in siw_check_mem()
168 return -E_BASE_BOUNDS; in siw_check_mem()
178 * @pd: Protection Domain memory should belong to
185 * NOTE: Function references SGE's memory object (mem->obj)
187 * released if check failed. If mem->obj is already valid, no new
193 struct siw_device *sdev = to_siw_dev(pd->device); in siw_check_sge()
197 if (len + off > sge->length) { in siw_check_sge()
198 rv = -E_BASE_BOUNDS; in siw_check_sge()
202 new = siw_mem_id2obj(sdev, sge->lkey >> 8); in siw_check_sge()
204 siw_dbg_pd(pd, "STag unknown: 0x%08x\n", sge->lkey); in siw_check_sge()
205 rv = -E_STAG_INVALID; in siw_check_sge()
210 /* Check if user re-registered with different STag key */ in siw_check_sge()
211 if (unlikely((*mem)->stag != sge->lkey)) { in siw_check_sge()
212 siw_dbg_mem((*mem), "STag mismatch: 0x%08x\n", sge->lkey); in siw_check_sge()
213 rv = -E_STAG_INVALID; in siw_check_sge()
216 rv = siw_check_mem(pd, *mem, sge->laddr + off, perms, len); in siw_check_sge()
239 if (!(wqe->sqe.flags & SIW_WQE_INLINE)) in siw_wqe_put_mem()
240 siw_unref_mem_sgl(wqe->mem, wqe->sqe.num_sge); in siw_wqe_put_mem()
244 siw_unref_mem_sgl(wqe->mem, wqe->rqe.num_sge); in siw_wqe_put_mem()
248 siw_unref_mem_sgl(wqe->mem, 1); in siw_wqe_put_mem()
262 struct siw_device *sdev = to_siw_dev(pd->device); in siw_invalidate_stag()
268 return -EINVAL; in siw_invalidate_stag()
270 if (unlikely(mem->pd != pd)) { in siw_invalidate_stag()
272 rv = -EACCES; in siw_invalidate_stag()
279 mem->stag_valid = 0; in siw_invalidate_stag()
297 while (i < pbl->num_buf) { in siw_pbl_get_buffer()
298 struct siw_pble *pble = &pbl->pbe[i]; in siw_pbl_get_buffer()
300 if (pble->pbl_off + pble->size > off) { in siw_pbl_get_buffer()
301 u64 pble_off = off - pble->pbl_off; in siw_pbl_get_buffer()
304 *len = pble->size - pble_off; in siw_pbl_get_buffer()
308 return pble->addr + pble_off; in siw_pbl_get_buffer()
322 return ERR_PTR(-EINVAL); in siw_pbl_alloc()
326 return ERR_PTR(-ENOMEM); in siw_pbl_alloc()
328 pbl->max_buf = num_buf; in siw_pbl_alloc()
344 return ERR_PTR(-EINVAL); in siw_umem_get()
347 num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT; in siw_umem_get()
352 return ERR_PTR(-ENOMEM); in siw_umem_get()
354 umem->page_chunk = in siw_umem_get()
356 if (!umem->page_chunk) { in siw_umem_get()
357 rv = -ENOMEM; in siw_umem_get()
366 umem->fp_addr = first_page_va; in siw_umem_get()
367 umem->base_mem = base_mem; in siw_umem_get()
369 sgt = &base_mem->sgt_append.sgt; in siw_umem_get()
370 __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0); in siw_umem_get()
373 rv = -EINVAL; in siw_umem_get()
382 rv = -ENOMEM; in siw_umem_get()
385 umem->page_chunk[i].plist = plist; in siw_umem_get()
386 while (nents--) { in siw_umem_get()
388 umem->num_pages++; in siw_umem_get()
389 num_pages--; in siw_umem_get()