xref: /linux/include/crypto/scatterwalk.h (revision da6f9bf40ac267b5c720694a817beea84fa40f77)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Cryptographic scatter and gather helpers.
4  *
5  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6  * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
7  * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
8  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
9  */
10 
11 #ifndef _CRYPTO_SCATTERWALK_H
12 #define _CRYPTO_SCATTERWALK_H
13 
14 #include <crypto/algapi.h>
15 
16 #include <linux/highmem.h>
17 #include <linux/mm.h>
18 #include <linux/scatterlist.h>
19 
20 static inline void scatterwalk_crypto_chain(struct scatterlist *head,
21 					    struct scatterlist *sg, int num)
22 {
23 	if (sg)
24 		sg_chain(head, num, sg);
25 	else
26 		sg_mark_end(head);
27 }
28 
29 static inline void scatterwalk_start(struct scatter_walk *walk,
30 				     struct scatterlist *sg)
31 {
32 	walk->sg = sg;
33 	walk->offset = sg->offset;
34 }
35 
36 /*
37  * This is equivalent to scatterwalk_start(walk, sg) followed by
38  * scatterwalk_skip(walk, pos).
39  */
40 static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,
41 					    struct scatterlist *sg,
42 					    unsigned int pos)
43 {
44 	while (pos > sg->length) {
45 		pos -= sg->length;
46 		sg = sg_next(sg);
47 	}
48 	walk->sg = sg;
49 	walk->offset = sg->offset + pos;
50 }
51 
52 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
53 					     unsigned int nbytes)
54 {
55 	unsigned int len_this_sg;
56 	unsigned int limit;
57 
58 	if (walk->offset >= walk->sg->offset + walk->sg->length)
59 		scatterwalk_start(walk, sg_next(walk->sg));
60 	len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;
61 
62 	/*
63 	 * HIGHMEM case: the page may have to be mapped into memory.  To avoid
64 	 * the complexity of having to map multiple pages at once per sg entry,
65 	 * clamp the returned length to not cross a page boundary.
66 	 *
67 	 * !HIGHMEM case: no mapping is needed; all pages of the sg entry are
68 	 * already mapped contiguously in the kernel's direct map.  For improved
69 	 * performance, allow the walker to return data segments that cross a
70 	 * page boundary.  Do still cap the length to PAGE_SIZE, since some
71 	 * users rely on that to avoid disabling preemption for too long when
72 	 * using SIMD.  It's also needed for when skcipher_walk uses a bounce
73 	 * page due to the data not being aligned to the algorithm's alignmask.
74 	 */
75 	if (IS_ENABLED(CONFIG_HIGHMEM))
76 		limit = PAGE_SIZE - offset_in_page(walk->offset);
77 	else
78 		limit = PAGE_SIZE;
79 
80 	return min3(nbytes, len_this_sg, limit);
81 }
82 
83 /*
84  * Create a scatterlist that represents the remaining data in a walk.  Uses
85  * chaining to reference the original scatterlist, so this uses at most two
86  * entries in @sg_out regardless of the number of entries in the original list.
87  * Assumes that sg_init_table() was already done.
88  */
89 static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
90 					  struct scatterlist sg_out[2])
91 {
92 	if (walk->offset >= walk->sg->offset + walk->sg->length)
93 		scatterwalk_start(walk, sg_next(walk->sg));
94 	sg_set_page(sg_out, sg_page(walk->sg),
95 		    walk->sg->offset + walk->sg->length - walk->offset,
96 		    walk->offset);
97 	scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);
98 }
99 
100 static inline void scatterwalk_map(struct scatter_walk *walk)
101 {
102 	struct page *base_page = sg_page(walk->sg);
103 
104 	if (IS_ENABLED(CONFIG_HIGHMEM)) {
105 		walk->__addr = kmap_local_page(base_page +
106 					       (walk->offset >> PAGE_SHIFT)) +
107 			       offset_in_page(walk->offset);
108 	} else {
109 		/*
110 		 * When !HIGHMEM we allow the walker to return segments that
111 		 * span a page boundary; see scatterwalk_clamp().  To make it
112 		 * clear that in this case we're working in the linear buffer of
113 		 * the whole sg entry in the kernel's direct map rather than
114 		 * within the mapped buffer of a single page, compute the
115 		 * address as an offset from the page_address() of the first
116 		 * page of the sg entry.  Either way the result is the address
117 		 * in the direct map, but this makes it clearer what is really
118 		 * going on.
119 		 */
120 		walk->__addr = page_address(base_page) + walk->offset;
121 	}
122 }
123 
124 /**
125  * scatterwalk_next() - Get the next data buffer in a scatterlist walk
126  * @walk: the scatter_walk
127  * @total: the total number of bytes remaining, > 0
128  *
129  * A virtual address for the next segment of data from the scatterlist will
130  * be placed into @walk->addr.  The caller must call scatterwalk_done_src()
131  * or scatterwalk_done_dst() when it is done using this virtual address.
132  *
133  * Returns: the next number of bytes available, <= @total
134  */
135 static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
136 					    unsigned int total)
137 {
138 	unsigned int nbytes = scatterwalk_clamp(walk, total);
139 
140 	scatterwalk_map(walk);
141 	return nbytes;
142 }
143 
144 static inline void scatterwalk_unmap(struct scatter_walk *walk)
145 {
146 	if (IS_ENABLED(CONFIG_HIGHMEM))
147 		kunmap_local(walk->__addr);
148 }
149 
150 static inline void scatterwalk_advance(struct scatter_walk *walk,
151 				       unsigned int nbytes)
152 {
153 	walk->offset += nbytes;
154 }
155 
156 /**
157  * scatterwalk_done_src() - Finish one step of a walk of source scatterlist
158  * @walk: the scatter_walk
159  * @nbytes: the number of bytes processed this step, less than or equal to the
160  *	    number of bytes that scatterwalk_next() returned.
161  *
162  * Use this if the mapped address was not written to, i.e. it is source data.
163  */
164 static inline void scatterwalk_done_src(struct scatter_walk *walk,
165 					unsigned int nbytes)
166 {
167 	scatterwalk_unmap(walk);
168 	scatterwalk_advance(walk, nbytes);
169 }
170 
171 /**
172  * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
173  * @walk: the scatter_walk
174  * @nbytes: the number of bytes processed this step, less than or equal to the
175  *	    number of bytes that scatterwalk_next() returned.
176  *
177  * Use this if the mapped address may have been written to, i.e. it is
178  * destination data.
179  */
180 static inline void scatterwalk_done_dst(struct scatter_walk *walk,
181 					unsigned int nbytes)
182 {
183 	scatterwalk_unmap(walk);
184 	/*
185 	 * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
186 	 * relying on flush_dcache_page() being a no-op when not implemented,
187 	 * since otherwise the BUG_ON in sg_page() does not get optimized out.
188 	 * This also avoids having to consider whether the loop would get
189 	 * reliably optimized out or not.
190 	 */
191 	if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
192 		struct page *base_page, *start_page, *end_page, *page;
193 
194 		base_page = sg_page(walk->sg);
195 		start_page = base_page + (walk->offset >> PAGE_SHIFT);
196 		end_page = base_page + ((walk->offset + nbytes +
197 					 PAGE_SIZE - 1) >> PAGE_SHIFT);
198 		for (page = start_page; page < end_page; page++)
199 			flush_dcache_page(page);
200 	}
201 	scatterwalk_advance(walk, nbytes);
202 }
203 
204 void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);
205 
206 void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
207 			     unsigned int nbytes);
208 
209 void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
210 			   unsigned int nbytes);
211 
212 void memcpy_from_sglist(void *buf, struct scatterlist *sg,
213 			unsigned int start, unsigned int nbytes);
214 
215 void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
216 		      const void *buf, unsigned int nbytes);
217 
218 void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
219 		   unsigned int nbytes);
220 
221 /* In new code, please use memcpy_{from,to}_sglist() directly instead. */
222 static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
223 					    unsigned int start,
224 					    unsigned int nbytes, int out)
225 {
226 	if (out)
227 		memcpy_to_sglist(sg, start, buf, nbytes);
228 	else
229 		memcpy_from_sglist(buf, sg, start, nbytes);
230 }
231 
232 struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
233 				     struct scatterlist *src,
234 				     unsigned int len);
235 
236 #endif  /* _CRYPTO_SCATTERWALK_H */
237