xref: /linux/drivers/infiniband/sw/siw/siw_mem.h (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
5 
6 #ifndef _SIW_MEM_H
7 #define _SIW_MEM_H
8 
9 struct siw_umem *siw_umem_get(struct ib_device *base_dave, u64 start,
10 			      u64 len, int rights);
11 void siw_umem_release(struct siw_umem *umem);
12 struct siw_pbl *siw_pbl_alloc(u32 num_buf);
13 dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
14 struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
15 int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
16 int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
17 int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
18 		  enum ib_access_flags perms, int len);
19 int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge,
20 		  struct siw_mem *mem[], enum ib_access_flags perms,
21 		  u32 off, int len);
22 void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op);
23 int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
24 		   u64 start, u64 len, int rights);
25 void siw_mr_drop_mem(struct siw_mr *mr);
26 void siw_free_mem(struct kref *ref);
27 
siw_mem_put(struct siw_mem * mem)28 static inline void siw_mem_put(struct siw_mem *mem)
29 {
30 	kref_put(&mem->ref, siw_free_mem);
31 }
32 
siw_unref_mem_sgl(struct siw_mem ** mem,unsigned int num_sge)33 static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
34 {
35 	while (num_sge) {
36 		if (*mem == NULL)
37 			break;
38 
39 		siw_mem_put(*mem);
40 		*mem = NULL;
41 		mem++;
42 		num_sge--;
43 	}
44 }
45 
46 #define CHUNK_SHIFT 9 /* sets number of pages per chunk */
47 #define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT)
48 #define CHUNK_MASK (~(PAGES_PER_CHUNK - 1))
49 #define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *))
50 
51 /*
52  * siw_get_upage()
53  *
54  * Get page pointer for address on given umem.
55  *
56  * @umem: two dimensional list of page pointers
57  * @addr: user virtual address
58  */
siw_get_upage(struct siw_umem * umem,u64 addr)59 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
60 {
61 	unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT,
62 		     chunk_idx = page_idx >> CHUNK_SHIFT,
63 		     page_in_chunk = page_idx & ~CHUNK_MASK;
64 
65 	if (likely(page_idx < umem->num_pages))
66 		return umem->page_chunk[chunk_idx].plist[page_in_chunk];
67 
68 	return NULL;
69 }
70 #endif
71