xref: /linux/include/linux/uio.h (revision 35219bc5c71f4197c8bd10297597de797c1eece5)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  *	Berkeley style UIO structures	-	Alan Cox 1994.
4  */
5 #ifndef __LINUX_UIO_H
6 #define __LINUX_UIO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/thread_info.h>
10 #include <linux/mm_types.h>
11 #include <uapi/linux/uio.h>
12 
13 struct page;
14 struct folio_queue;
15 
16 typedef unsigned int __bitwise iov_iter_extraction_t;
17 
18 struct kvec {
19 	void *iov_base; /* and that should *never* hold a userland pointer */
20 	size_t iov_len;
21 };
22 
23 enum iter_type {
24 	/* iter types */
25 	ITER_UBUF,
26 	ITER_IOVEC,
27 	ITER_BVEC,
28 	ITER_KVEC,
29 	ITER_FOLIOQ,
30 	ITER_XARRAY,
31 	ITER_DISCARD,
32 };
33 
34 #define ITER_SOURCE	1	// == WRITE
35 #define ITER_DEST	0	// == READ
36 
37 struct iov_iter_state {
38 	size_t iov_offset;
39 	size_t count;
40 	unsigned long nr_segs;
41 };
42 
43 struct iov_iter {
44 	u8 iter_type;
45 	bool nofault;
46 	bool data_source;
47 	size_t iov_offset;
48 	/*
49 	 * Hack alert: overlay ubuf_iovec with iovec + count, so
50 	 * that the members resolve correctly regardless of the type
51 	 * of iterator used. This means that you can use:
52 	 *
53 	 * &iter->__ubuf_iovec or iter->__iov
54 	 *
55 	 * interchangably for the user_backed cases, hence simplifying
56 	 * some of the cases that need to deal with both.
57 	 */
58 	union {
59 		/*
60 		 * This really should be a const, but we cannot do that without
61 		 * also modifying any of the zero-filling iter init functions.
62 		 * Leave it non-const for now, but it should be treated as such.
63 		 */
64 		struct iovec __ubuf_iovec;
65 		struct {
66 			union {
67 				/* use iter_iov() to get the current vec */
68 				const struct iovec *__iov;
69 				const struct kvec *kvec;
70 				const struct bio_vec *bvec;
71 				const struct folio_queue *folioq;
72 				struct xarray *xarray;
73 				void __user *ubuf;
74 			};
75 			size_t count;
76 		};
77 	};
78 	union {
79 		unsigned long nr_segs;
80 		u8 folioq_slot;
81 		loff_t xarray_start;
82 	};
83 };
84 
iter_iov(const struct iov_iter * iter)85 static inline const struct iovec *iter_iov(const struct iov_iter *iter)
86 {
87 	if (iter->iter_type == ITER_UBUF)
88 		return (const struct iovec *) &iter->__ubuf_iovec;
89 	return iter->__iov;
90 }
91 
92 #define iter_iov_addr(iter)	(iter_iov(iter)->iov_base + (iter)->iov_offset)
93 #define iter_iov_len(iter)	(iter_iov(iter)->iov_len - (iter)->iov_offset)
94 
iov_iter_type(const struct iov_iter * i)95 static inline enum iter_type iov_iter_type(const struct iov_iter *i)
96 {
97 	return i->iter_type;
98 }
99 
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)100 static inline void iov_iter_save_state(struct iov_iter *iter,
101 				       struct iov_iter_state *state)
102 {
103 	state->iov_offset = iter->iov_offset;
104 	state->count = iter->count;
105 	state->nr_segs = iter->nr_segs;
106 }
107 
iter_is_ubuf(const struct iov_iter * i)108 static inline bool iter_is_ubuf(const struct iov_iter *i)
109 {
110 	return iov_iter_type(i) == ITER_UBUF;
111 }
112 
iter_is_iovec(const struct iov_iter * i)113 static inline bool iter_is_iovec(const struct iov_iter *i)
114 {
115 	return iov_iter_type(i) == ITER_IOVEC;
116 }
117 
iov_iter_is_kvec(const struct iov_iter * i)118 static inline bool iov_iter_is_kvec(const struct iov_iter *i)
119 {
120 	return iov_iter_type(i) == ITER_KVEC;
121 }
122 
iov_iter_is_bvec(const struct iov_iter * i)123 static inline bool iov_iter_is_bvec(const struct iov_iter *i)
124 {
125 	return iov_iter_type(i) == ITER_BVEC;
126 }
127 
iov_iter_is_discard(const struct iov_iter * i)128 static inline bool iov_iter_is_discard(const struct iov_iter *i)
129 {
130 	return iov_iter_type(i) == ITER_DISCARD;
131 }
132 
iov_iter_is_folioq(const struct iov_iter * i)133 static inline bool iov_iter_is_folioq(const struct iov_iter *i)
134 {
135 	return iov_iter_type(i) == ITER_FOLIOQ;
136 }
137 
iov_iter_is_xarray(const struct iov_iter * i)138 static inline bool iov_iter_is_xarray(const struct iov_iter *i)
139 {
140 	return iov_iter_type(i) == ITER_XARRAY;
141 }
142 
iov_iter_rw(const struct iov_iter * i)143 static inline unsigned char iov_iter_rw(const struct iov_iter *i)
144 {
145 	return i->data_source ? WRITE : READ;
146 }
147 
user_backed_iter(const struct iov_iter * i)148 static inline bool user_backed_iter(const struct iov_iter *i)
149 {
150 	return iter_is_ubuf(i) || iter_is_iovec(i);
151 }
152 
153 /*
154  * Total number of bytes covered by an iovec.
155  *
156  * NOTE that it is not safe to use this function until all the iovec's
157  * segment lengths have been validated.  Because the individual lengths can
158  * overflow a size_t when added together.
159  */
iov_length(const struct iovec * iov,unsigned long nr_segs)160 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
161 {
162 	unsigned long seg;
163 	size_t ret = 0;
164 
165 	for (seg = 0; seg < nr_segs; seg++)
166 		ret += iov[seg].iov_len;
167 	return ret;
168 }
169 
170 size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
171 				  size_t bytes, struct iov_iter *i);
172 void iov_iter_advance(struct iov_iter *i, size_t bytes);
173 void iov_iter_revert(struct iov_iter *i, size_t bytes);
174 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
175 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
176 size_t iov_iter_single_seg_count(const struct iov_iter *i);
177 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
178 			 struct iov_iter *i);
179 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
180 			 struct iov_iter *i);
181 
182 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
183 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
184 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
185 
copy_folio_to_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)186 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
187 		size_t bytes, struct iov_iter *i)
188 {
189 	return copy_page_to_iter(&folio->page, offset, bytes, i);
190 }
191 
copy_folio_from_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)192 static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset,
193 					  size_t bytes, struct iov_iter *i)
194 {
195 	return copy_page_from_iter(&folio->page, offset, bytes, i);
196 }
197 
copy_folio_from_iter_atomic(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)198 static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
199 		size_t offset, size_t bytes, struct iov_iter *i)
200 {
201 	return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
202 }
203 
204 size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
205 				 size_t bytes, struct iov_iter *i);
206 
207 static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)208 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
209 {
210 	if (check_copy_size(addr, bytes, true))
211 		return _copy_to_iter(addr, bytes, i);
212 	return 0;
213 }
214 
215 static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)216 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
217 {
218 	if (check_copy_size(addr, bytes, false))
219 		return _copy_from_iter(addr, bytes, i);
220 	return 0;
221 }
222 
223 static __always_inline __must_check
copy_to_iter_full(const void * addr,size_t bytes,struct iov_iter * i)224 bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i)
225 {
226 	size_t copied = copy_to_iter(addr, bytes, i);
227 	if (likely(copied == bytes))
228 		return true;
229 	iov_iter_revert(i, copied);
230 	return false;
231 }
232 
233 static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)234 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
235 {
236 	size_t copied = copy_from_iter(addr, bytes, i);
237 	if (likely(copied == bytes))
238 		return true;
239 	iov_iter_revert(i, copied);
240 	return false;
241 }
242 
243 static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)244 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
245 {
246 	if (check_copy_size(addr, bytes, false))
247 		return _copy_from_iter_nocache(addr, bytes, i);
248 	return 0;
249 }
250 
251 static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)252 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
253 {
254 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
255 	if (likely(copied == bytes))
256 		return true;
257 	iov_iter_revert(i, copied);
258 	return false;
259 }
260 
261 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
262 /*
263  * Note, users like pmem that depend on the stricter semantics of
264  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
265  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
266  * destination is flushed from the cache on return.
267  */
268 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
269 #else
270 #define _copy_from_iter_flushcache _copy_from_iter_nocache
271 #endif
272 
273 #ifdef CONFIG_ARCH_HAS_COPY_MC
274 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
275 #else
276 #define _copy_mc_to_iter _copy_to_iter
277 #endif
278 
279 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
280 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
281 			unsigned len_mask);
282 unsigned long iov_iter_alignment(const struct iov_iter *i);
283 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
284 void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
285 			unsigned long nr_segs, size_t count);
286 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
287 			unsigned long nr_segs, size_t count);
288 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
289 			unsigned long nr_segs, size_t count);
290 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
291 void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
292 			  const struct folio_queue *folioq,
293 			  unsigned int first_slot, unsigned int offset, size_t count);
294 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
295 		     loff_t start, size_t count);
296 ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
297 			size_t maxsize, unsigned maxpages, size_t *start);
298 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
299 			size_t maxsize, size_t *start);
300 int iov_iter_npages(const struct iov_iter *i, int maxpages);
301 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
302 
303 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
304 
iov_iter_count(const struct iov_iter * i)305 static inline size_t iov_iter_count(const struct iov_iter *i)
306 {
307 	return i->count;
308 }
309 
310 /*
311  * Cap the iov_iter by given limit; note that the second argument is
312  * *not* the new size - it's upper limit for such.  Passing it a value
313  * greater than the amount of data in iov_iter is fine - it'll just do
314  * nothing in that case.
315  */
iov_iter_truncate(struct iov_iter * i,u64 count)316 static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
317 {
318 	/*
319 	 * count doesn't have to fit in size_t - comparison extends both
320 	 * operands to u64 here and any value that would be truncated by
321 	 * conversion in assignement is by definition greater than all
322 	 * values of size_t, including old i->count.
323 	 */
324 	if (i->count > count)
325 		i->count = count;
326 }
327 
328 /*
329  * reexpand a previously truncated iterator; count must be no more than how much
330  * we had shrunk it.
331  */
iov_iter_reexpand(struct iov_iter * i,size_t count)332 static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
333 {
334 	i->count = count;
335 }
336 
337 static inline int
iov_iter_npages_cap(struct iov_iter * i,int maxpages,size_t max_bytes)338 iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
339 {
340 	size_t shorted = 0;
341 	int npages;
342 
343 	if (iov_iter_count(i) > max_bytes) {
344 		shorted = iov_iter_count(i) - max_bytes;
345 		iov_iter_truncate(i, max_bytes);
346 	}
347 	npages = iov_iter_npages(i, maxpages);
348 	if (shorted)
349 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
350 
351 	return npages;
352 }
353 
354 struct iovec *iovec_from_user(const struct iovec __user *uvector,
355 		unsigned long nr_segs, unsigned long fast_segs,
356 		struct iovec *fast_iov, bool compat);
357 ssize_t import_iovec(int type, const struct iovec __user *uvec,
358 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
359 		 struct iov_iter *i);
360 ssize_t __import_iovec(int type, const struct iovec __user *uvec,
361 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
362 		 struct iov_iter *i, bool compat);
363 int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
364 
iov_iter_ubuf(struct iov_iter * i,unsigned int direction,void __user * buf,size_t count)365 static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
366 			void __user *buf, size_t count)
367 {
368 	WARN_ON(direction & ~(READ | WRITE));
369 	*i = (struct iov_iter) {
370 		.iter_type = ITER_UBUF,
371 		.data_source = direction,
372 		.ubuf = buf,
373 		.count = count,
374 		.nr_segs = 1
375 	};
376 }
377 /* Flags for iov_iter_get/extract_pages*() */
378 /* Allow P2PDMA on the extracted pages */
379 #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01)
380 
381 ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
382 			       size_t maxsize, unsigned int maxpages,
383 			       iov_iter_extraction_t extraction_flags,
384 			       size_t *offset0);
385 
386 /**
387  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
388  * @iter: The iterator
389  *
390  * Examine the iterator and indicate by returning true or false as to how, if
391  * at all, pages extracted from the iterator will be retained by the extraction
392  * function.
393  *
394  * %true indicates that the pages will have a pin placed in them that the
395  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
396  * to forcibly copy a page for the child (the parent must retain the original
397  * page).
398  *
399  * %false indicates that no measures are taken and that it's up to the caller
400  * to retain the pages.
401  */
iov_iter_extract_will_pin(const struct iov_iter * iter)402 static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
403 {
404 	return user_backed_iter(iter);
405 }
406 
407 struct sg_table;
408 ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
409 			   struct sg_table *sgtable, unsigned int sg_max,
410 			   iov_iter_extraction_t extraction_flags);
411 
412 #endif
413