xref: /linux/include/rdma/iter.h (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. */
3 
4 #ifndef _RDMA_ITER_H_
5 #define _RDMA_ITER_H_
6 
7 #include <linux/scatterlist.h>
8 #include <rdma/ib_umem.h>
9 
10 /**
11  * IB block DMA iterator
12  *
13  * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
14  * to a HW supported page size.
15  */
16 struct ib_block_iter {
17 	/* internal states */
18 	struct scatterlist *__sg;	/* sg holding the current aligned block */
19 	dma_addr_t __dma_addr;		/* unaligned DMA address of this block */
20 	size_t __sg_numblocks;		/* ib_umem_num_dma_blocks() */
21 	unsigned int __sg_nents;	/* number of SG entries */
22 	unsigned int __sg_advance;	/* number of bytes to advance in sg in next step */
23 	unsigned int __pg_bit;		/* alignment of current block */
24 };
25 
26 void __rdma_block_iter_start(struct ib_block_iter *biter,
27 			     struct scatterlist *sglist,
28 			     unsigned int nents,
29 			     unsigned long pgsz);
30 bool __rdma_block_iter_next(struct ib_block_iter *biter);
31 
32 /**
33  * rdma_block_iter_dma_address - get the aligned dma address of the current
34  * block held by the block iterator.
35  * @biter: block iterator holding the memory block
36  */
37 static inline dma_addr_t
38 rdma_block_iter_dma_address(struct ib_block_iter *biter)
39 {
40 	return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
41 }
42 
43 /**
44  * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
45  * @sglist: sglist to iterate over
46  * @biter: block iterator holding the memory block
47  * @nents: maximum number of sg entries to iterate over
48  * @pgsz: best HW supported page size to use
49  *
50  * Callers may use rdma_block_iter_dma_address() to get each
51  * blocks aligned DMA address.
52  */
53 #define rdma_for_each_block(sglist, biter, nents, pgsz)		\
54 	for (__rdma_block_iter_start(biter, sglist, nents,	\
55 				     pgsz);			\
56 	     __rdma_block_iter_next(biter);)
57 
58 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
59 						struct ib_umem *umem,
60 						unsigned long pgsz)
61 {
62 	__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
63 				umem->sgt_append.sgt.nents, pgsz);
64 	biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
65 	biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
66 }
67 
68 static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
69 {
70 	return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
71 }
72 
73 /**
74  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
75  * @umem: umem to iterate over
76  * @pgsz: Page size to split the list into
77  *
78  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
79  * returned DMA blocks will be aligned to pgsz and span the range:
80  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
81  *
82  * Performs exactly ib_umem_num_dma_blocks() iterations.
83  */
84 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
85 	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
86 	     __rdma_umem_block_iter_next(biter);)
87 
88 #endif /* _RDMA_ITER_H_ */
89