xref: /linux/include/linux/uio.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *	Berkeley style UIO structures	-	Alan Cox 1994.
41da177e4SLinus Torvalds  */
5607ca46eSDavid Howells #ifndef __LINUX_UIO_H
6607ca46eSDavid Howells #define __LINUX_UIO_H
71da177e4SLinus Torvalds 
892236878SKent Overstreet #include <linux/kernel.h>
9aa28de27SAl Viro #include <linux/thread_info.h>
10d9c19d32SMatthew Wilcox (Oracle) #include <linux/mm_types.h>
11607ca46eSDavid Howells #include <uapi/linux/uio.h>
121da177e4SLinus Torvalds 
1392236878SKent Overstreet struct page;
14812ed032SJiri Slaby 
15f62e52d1SDavid Howells typedef unsigned int __bitwise iov_iter_extraction_t;
16f62e52d1SDavid Howells 
17812ed032SJiri Slaby struct kvec {
18812ed032SJiri Slaby 	void *iov_base; /* and that should *never* hold a userland pointer */
19812ed032SJiri Slaby 	size_t iov_len;
20812ed032SJiri Slaby };
21812ed032SJiri Slaby 
2200e23707SDavid Howells enum iter_type {
23875f1d07SJens Axboe 	/* iter types */
247d9e44a6SDavid Howells 	ITER_UBUF,
258cd54c1cSAl Viro 	ITER_IOVEC,
268cd54c1cSAl Viro 	ITER_BVEC,
277d9e44a6SDavid Howells 	ITER_KVEC,
288cd54c1cSAl Viro 	ITER_XARRAY,
298cd54c1cSAl Viro 	ITER_DISCARD,
3062a8067aSAl Viro };
3162a8067aSAl Viro 
32de4eda9dSAl Viro #define ITER_SOURCE	1	// == WRITE
33de4eda9dSAl Viro #define ITER_DEST	0	// == READ
34de4eda9dSAl Viro 
358fb0f47aSJens Axboe struct iov_iter_state {
368fb0f47aSJens Axboe 	size_t iov_offset;
378fb0f47aSJens Axboe 	size_t count;
388fb0f47aSJens Axboe 	unsigned long nr_segs;
398fb0f47aSJens Axboe };
408fb0f47aSJens Axboe 
4192236878SKent Overstreet struct iov_iter {
428cd54c1cSAl Viro 	u8 iter_type;
433337ab08SAndreas Gruenbacher 	bool nofault;
448cd54c1cSAl Viro 	bool data_source;
4592236878SKent Overstreet 	size_t iov_offset;
46747b1f65SJens Axboe 	/*
47747b1f65SJens Axboe 	 * Hack alert: overlay ubuf_iovec with iovec + count, so
48747b1f65SJens Axboe 	 * that the members resolve correctly regardless of the type
49747b1f65SJens Axboe 	 * of iterator used. This means that you can use:
50747b1f65SJens Axboe 	 *
51747b1f65SJens Axboe 	 * &iter->__ubuf_iovec or iter->__iov
52747b1f65SJens Axboe 	 *
53747b1f65SJens Axboe 	 * interchangably for the user_backed cases, hence simplifying
54747b1f65SJens Axboe 	 * some of the cases that need to deal with both.
55747b1f65SJens Axboe 	 */
56747b1f65SJens Axboe 	union {
57747b1f65SJens Axboe 		/*
58747b1f65SJens Axboe 		 * This really should be a const, but we cannot do that without
59747b1f65SJens Axboe 		 * also modifying any of the zero-filling iter init functions.
60747b1f65SJens Axboe 		 * Leave it non-const for now, but it should be treated as such.
61747b1f65SJens Axboe 		 */
62747b1f65SJens Axboe 		struct iovec __ubuf_iovec;
63747b1f65SJens Axboe 		struct {
6462a8067aSAl Viro 			union {
65de4f5fedSJens Axboe 				/* use iter_iov() to get the current vec */
66de4f5fedSJens Axboe 				const struct iovec *__iov;
67a280455fSAl Viro 				const struct kvec *kvec;
6862a8067aSAl Viro 				const struct bio_vec *bvec;
697ff50620SDavid Howells 				struct xarray *xarray;
70fcb14cb1SAl Viro 				void __user *ubuf;
7162a8067aSAl Viro 			};
72747b1f65SJens Axboe 			size_t count;
73747b1f65SJens Axboe 		};
74747b1f65SJens Axboe 	};
75241699cdSAl Viro 	union {
7662a8067aSAl Viro 		unsigned long nr_segs;
777ff50620SDavid Howells 		loff_t xarray_start;
78241699cdSAl Viro 	};
7992236878SKent Overstreet };
8092236878SKent Overstreet 
iter_iov(const struct iov_iter * iter)81747b1f65SJens Axboe static inline const struct iovec *iter_iov(const struct iov_iter *iter)
82747b1f65SJens Axboe {
83747b1f65SJens Axboe 	if (iter->iter_type == ITER_UBUF)
84747b1f65SJens Axboe 		return (const struct iovec *) &iter->__ubuf_iovec;
85747b1f65SJens Axboe 	return iter->__iov;
86747b1f65SJens Axboe }
87747b1f65SJens Axboe 
8895e49cf8SJens Axboe #define iter_iov_addr(iter)	(iter_iov(iter)->iov_base + (iter)->iov_offset)
8995e49cf8SJens Axboe #define iter_iov_len(iter)	(iter_iov(iter)->iov_len - (iter)->iov_offset)
90de4f5fedSJens Axboe 
iov_iter_type(const struct iov_iter * i)9100e23707SDavid Howells static inline enum iter_type iov_iter_type(const struct iov_iter *i)
9200e23707SDavid Howells {
938cd54c1cSAl Viro 	return i->iter_type;
9400e23707SDavid Howells }
9500e23707SDavid Howells 
iov_iter_save_state(struct iov_iter * iter,struct iov_iter_state * state)968fb0f47aSJens Axboe static inline void iov_iter_save_state(struct iov_iter *iter,
978fb0f47aSJens Axboe 				       struct iov_iter_state *state)
988fb0f47aSJens Axboe {
998fb0f47aSJens Axboe 	state->iov_offset = iter->iov_offset;
1008fb0f47aSJens Axboe 	state->count = iter->count;
1018fb0f47aSJens Axboe 	state->nr_segs = iter->nr_segs;
1028fb0f47aSJens Axboe }
1038fb0f47aSJens Axboe 
iter_is_ubuf(const struct iov_iter * i)104fcb14cb1SAl Viro static inline bool iter_is_ubuf(const struct iov_iter *i)
105fcb14cb1SAl Viro {
106fcb14cb1SAl Viro 	return iov_iter_type(i) == ITER_UBUF;
107fcb14cb1SAl Viro }
108fcb14cb1SAl Viro 
iter_is_iovec(const struct iov_iter * i)10900e23707SDavid Howells static inline bool iter_is_iovec(const struct iov_iter *i)
11000e23707SDavid Howells {
11100e23707SDavid Howells 	return iov_iter_type(i) == ITER_IOVEC;
11200e23707SDavid Howells }
11300e23707SDavid Howells 
iov_iter_is_kvec(const struct iov_iter * i)11400e23707SDavid Howells static inline bool iov_iter_is_kvec(const struct iov_iter *i)
11500e23707SDavid Howells {
11600e23707SDavid Howells 	return iov_iter_type(i) == ITER_KVEC;
11700e23707SDavid Howells }
11800e23707SDavid Howells 
iov_iter_is_bvec(const struct iov_iter * i)11900e23707SDavid Howells static inline bool iov_iter_is_bvec(const struct iov_iter *i)
12000e23707SDavid Howells {
12100e23707SDavid Howells 	return iov_iter_type(i) == ITER_BVEC;
12200e23707SDavid Howells }
12300e23707SDavid Howells 
iov_iter_is_discard(const struct iov_iter * i)1249ea9ce04SDavid Howells static inline bool iov_iter_is_discard(const struct iov_iter *i)
1259ea9ce04SDavid Howells {
1269ea9ce04SDavid Howells 	return iov_iter_type(i) == ITER_DISCARD;
1279ea9ce04SDavid Howells }
1289ea9ce04SDavid Howells 
iov_iter_is_xarray(const struct iov_iter * i)1297ff50620SDavid Howells static inline bool iov_iter_is_xarray(const struct iov_iter *i)
1307ff50620SDavid Howells {
1317ff50620SDavid Howells 	return iov_iter_type(i) == ITER_XARRAY;
1327ff50620SDavid Howells }
1337ff50620SDavid Howells 
iov_iter_rw(const struct iov_iter * i)13400e23707SDavid Howells static inline unsigned char iov_iter_rw(const struct iov_iter *i)
13500e23707SDavid Howells {
1368cd54c1cSAl Viro 	return i->data_source ? WRITE : READ;
13700e23707SDavid Howells }
13800e23707SDavid Howells 
user_backed_iter(const struct iov_iter * i)139fcb14cb1SAl Viro static inline bool user_backed_iter(const struct iov_iter *i)
140fcb14cb1SAl Viro {
141f1b4cb65SDavid Howells 	return iter_is_ubuf(i) || iter_is_iovec(i);
142fcb14cb1SAl Viro }
143fcb14cb1SAl Viro 
1441da177e4SLinus Torvalds /*
1451da177e4SLinus Torvalds  * Total number of bytes covered by an iovec.
1461da177e4SLinus Torvalds  *
1471da177e4SLinus Torvalds  * NOTE that it is not safe to use this function until all the iovec's
1481da177e4SLinus Torvalds  * segment lengths have been validated.  Because the individual lengths can
1491da177e4SLinus Torvalds  * overflow a size_t when added together.
1501da177e4SLinus Torvalds  */
iov_length(const struct iovec * iov,unsigned long nr_segs)1511da177e4SLinus Torvalds static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
1521da177e4SLinus Torvalds {
1531da177e4SLinus Torvalds 	unsigned long seg;
1541da177e4SLinus Torvalds 	size_t ret = 0;
1551da177e4SLinus Torvalds 
1561da177e4SLinus Torvalds 	for (seg = 0; seg < nr_segs; seg++)
1571da177e4SLinus Torvalds 		ret += iov[seg].iov_len;
1581da177e4SLinus Torvalds 	return ret;
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds 
1611b030698SMatthew Wilcox (Oracle) size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
162f0b65f39SAl Viro 				  size_t bytes, struct iov_iter *i);
16392236878SKent Overstreet void iov_iter_advance(struct iov_iter *i, size_t bytes);
16427c0e374SAl Viro void iov_iter_revert(struct iov_iter *i, size_t bytes);
165a6294593SAndreas Gruenbacher size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
166cdd591fcSAndreas Gruenbacher size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes);
16792236878SKent Overstreet size_t iov_iter_single_seg_count(const struct iov_iter *i);
1686e58e79dSAl Viro size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
1696e58e79dSAl Viro 			 struct iov_iter *i);
170f0d1bec9SAl Viro size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
171f0d1bec9SAl Viro 			 struct iov_iter *i);
172aa28de27SAl Viro 
173aa28de27SAl Viro size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
174aa28de27SAl Viro size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
175aa28de27SAl Viro size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
176aa28de27SAl Viro 
copy_folio_to_iter(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)177d9c19d32SMatthew Wilcox (Oracle) static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
178d9c19d32SMatthew Wilcox (Oracle) 		size_t bytes, struct iov_iter *i)
179d9c19d32SMatthew Wilcox (Oracle) {
180d9c19d32SMatthew Wilcox (Oracle) 	return copy_page_to_iter(&folio->page, offset, bytes, i);
181d9c19d32SMatthew Wilcox (Oracle) }
1821b030698SMatthew Wilcox (Oracle) 
copy_folio_from_iter_atomic(struct folio * folio,size_t offset,size_t bytes,struct iov_iter * i)1831b030698SMatthew Wilcox (Oracle) static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
1841b030698SMatthew Wilcox (Oracle) 		size_t offset, size_t bytes, struct iov_iter *i)
1851b030698SMatthew Wilcox (Oracle) {
1861b030698SMatthew Wilcox (Oracle) 	return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
1871b030698SMatthew Wilcox (Oracle) }
1881b030698SMatthew Wilcox (Oracle) 
1894f80818bSLorenzo Stoakes size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
1904f80818bSLorenzo Stoakes 				 size_t bytes, struct iov_iter *i);
191d9c19d32SMatthew Wilcox (Oracle) 
192aa28de27SAl Viro static __always_inline __must_check
copy_to_iter(const void * addr,size_t bytes,struct iov_iter * i)193aa28de27SAl Viro size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
194aa28de27SAl Viro {
1950e3c3b90SAl Viro 	if (check_copy_size(addr, bytes, true))
196aa28de27SAl Viro 		return _copy_to_iter(addr, bytes, i);
1970e3c3b90SAl Viro 	return 0;
198aa28de27SAl Viro }
199aa28de27SAl Viro 
200aa28de27SAl Viro static __always_inline __must_check
copy_from_iter(void * addr,size_t bytes,struct iov_iter * i)201aa28de27SAl Viro size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
202aa28de27SAl Viro {
2030e3c3b90SAl Viro 	if (check_copy_size(addr, bytes, false))
204aa28de27SAl Viro 		return _copy_from_iter(addr, bytes, i);
2050e3c3b90SAl Viro 	return 0;
206aa28de27SAl Viro }
207aa28de27SAl Viro 
208aa28de27SAl Viro static __always_inline __must_check
copy_to_iter_full(const void * addr,size_t bytes,struct iov_iter * i)209*e8205119SAl Viro bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i)
210*e8205119SAl Viro {
211*e8205119SAl Viro 	size_t copied = copy_to_iter(addr, bytes, i);
212*e8205119SAl Viro 	if (likely(copied == bytes))
213*e8205119SAl Viro 		return true;
214*e8205119SAl Viro 	iov_iter_revert(i, copied);
215*e8205119SAl Viro 	return false;
216*e8205119SAl Viro }
217*e8205119SAl Viro 
218*e8205119SAl Viro static __always_inline __must_check
copy_from_iter_full(void * addr,size_t bytes,struct iov_iter * i)219aa28de27SAl Viro bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
220aa28de27SAl Viro {
2214b6c132bSAl Viro 	size_t copied = copy_from_iter(addr, bytes, i);
2224b6c132bSAl Viro 	if (likely(copied == bytes))
2234b6c132bSAl Viro 		return true;
2244b6c132bSAl Viro 	iov_iter_revert(i, copied);
225aa28de27SAl Viro 	return false;
226aa28de27SAl Viro }
227aa28de27SAl Viro 
228aa28de27SAl Viro static __always_inline __must_check
copy_from_iter_nocache(void * addr,size_t bytes,struct iov_iter * i)229aa28de27SAl Viro size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
230aa28de27SAl Viro {
2310e3c3b90SAl Viro 	if (check_copy_size(addr, bytes, false))
232aa28de27SAl Viro 		return _copy_from_iter_nocache(addr, bytes, i);
2330e3c3b90SAl Viro 	return 0;
234aa28de27SAl Viro }
235aa28de27SAl Viro 
236aa28de27SAl Viro static __always_inline __must_check
copy_from_iter_full_nocache(void * addr,size_t bytes,struct iov_iter * i)237aa28de27SAl Viro bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
238aa28de27SAl Viro {
2394b6c132bSAl Viro 	size_t copied = copy_from_iter_nocache(addr, bytes, i);
2404b6c132bSAl Viro 	if (likely(copied == bytes))
2414b6c132bSAl Viro 		return true;
2424b6c132bSAl Viro 	iov_iter_revert(i, copied);
243aa28de27SAl Viro 	return false;
244aa28de27SAl Viro }
245aa28de27SAl Viro 
2460aed55afSDan Williams #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
2470aed55afSDan Williams /*
2480aed55afSDan Williams  * Note, users like pmem that depend on the stricter semantics of
249e17f7a0bSChristoph Hellwig  * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
2500aed55afSDan Williams  * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
2510aed55afSDan Williams  * destination is flushed from the cache on return.
2520aed55afSDan Williams  */
2536a37e940SLinus Torvalds size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
2540aed55afSDan Williams #else
2556a37e940SLinus Torvalds #define _copy_from_iter_flushcache _copy_from_iter_nocache
2560aed55afSDan Williams #endif
2576a37e940SLinus Torvalds 
258ec6347bbSDan Williams #ifdef CONFIG_ARCH_HAS_COPY_MC
259ec6347bbSDan Williams size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
2608780356eSDan Williams #else
261ec6347bbSDan Williams #define _copy_mc_to_iter _copy_to_iter
2628780356eSDan Williams #endif
2638780356eSDan Williams 
264c35e0248SMatthew Wilcox size_t iov_iter_zero(size_t bytes, struct iov_iter *);
265cfa320f7SKeith Busch bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
266cfa320f7SKeith Busch 			unsigned len_mask);
267886a3911SAl Viro unsigned long iov_iter_alignment(const struct iov_iter *i);
268357f435dSAl Viro unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
269aa563d7bSDavid Howells void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
27071d8e532SAl Viro 			unsigned long nr_segs, size_t count);
271aa563d7bSDavid Howells void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
27205afcb77SAl Viro 			unsigned long nr_segs, size_t count);
273aa563d7bSDavid Howells void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
274abb78f87SAl Viro 			unsigned long nr_segs, size_t count);
2759ea9ce04SDavid Howells void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
2767ff50620SDavid Howells void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
2777ff50620SDavid Howells 		     loff_t start, size_t count);
278eba2d3d7SAl Viro ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
2792c80929cSMiklos Szeredi 			size_t maxsize, unsigned maxpages, size_t *start);
280eba2d3d7SAl Viro ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
28191f79c43SAl Viro 			size_t maxsize, size_t *start);
282f67da30cSAl Viro int iov_iter_npages(const struct iov_iter *i, int maxpages);
2838fb0f47aSJens Axboe void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state);
28492236878SKent Overstreet 
2854b8164b9SAl Viro const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
2864b8164b9SAl Viro 
iov_iter_count(const struct iov_iter * i)287b57332b4SAl Viro static inline size_t iov_iter_count(const struct iov_iter *i)
28892236878SKent Overstreet {
28992236878SKent Overstreet 	return i->count;
29092236878SKent Overstreet }
29192236878SKent Overstreet 
292bd8e0ff9SOmar Sandoval /*
2930b86dbf6SAl Viro  * Cap the iov_iter by given limit; note that the second argument is
2940b86dbf6SAl Viro  * *not* the new size - it's upper limit for such.  Passing it a value
2950b86dbf6SAl Viro  * greater than the amount of data in iov_iter is fine - it'll just do
2960b86dbf6SAl Viro  * nothing in that case.
2970b86dbf6SAl Viro  */
iov_iter_truncate(struct iov_iter * i,u64 count)2980b86dbf6SAl Viro static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
2990c949334SAl Viro {
3000b86dbf6SAl Viro 	/*
3010b86dbf6SAl Viro 	 * count doesn't have to fit in size_t - comparison extends both
3020b86dbf6SAl Viro 	 * operands to u64 here and any value that would be truncated by
3030b86dbf6SAl Viro 	 * conversion in assignement is by definition greater than all
3040b86dbf6SAl Viro 	 * values of size_t, including old i->count.
3050b86dbf6SAl Viro 	 */
3067dedd3e1SJens Axboe 	if (i->count > count)
3070c949334SAl Viro 		i->count = count;
3080c949334SAl Viro }
3090c949334SAl Viro 
310b42b15fdSAl Viro /*
311b42b15fdSAl Viro  * reexpand a previously truncated iterator; count must be no more than how much
312b42b15fdSAl Viro  * we had shrunk it.
313b42b15fdSAl Viro  */
iov_iter_reexpand(struct iov_iter * i,size_t count)314b42b15fdSAl Viro static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
315b42b15fdSAl Viro {
316b42b15fdSAl Viro 	i->count = count;
317b42b15fdSAl Viro }
31852cbd23aSWillem de Bruijn 
319b93235e6SJakub Kicinski static inline int
iov_iter_npages_cap(struct iov_iter * i,int maxpages,size_t max_bytes)320b93235e6SJakub Kicinski iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
321b93235e6SJakub Kicinski {
322b93235e6SJakub Kicinski 	size_t shorted = 0;
323b93235e6SJakub Kicinski 	int npages;
324b93235e6SJakub Kicinski 
325b93235e6SJakub Kicinski 	if (iov_iter_count(i) > max_bytes) {
326b93235e6SJakub Kicinski 		shorted = iov_iter_count(i) - max_bytes;
327b93235e6SJakub Kicinski 		iov_iter_truncate(i, max_bytes);
328b93235e6SJakub Kicinski 	}
3297187440dSDan Carpenter 	npages = iov_iter_npages(i, maxpages);
330b93235e6SJakub Kicinski 	if (shorted)
331b93235e6SJakub Kicinski 		iov_iter_reexpand(i, iov_iter_count(i) + shorted);
332b93235e6SJakub Kicinski 
333b93235e6SJakub Kicinski 	return npages;
334b93235e6SJakub Kicinski }
335b93235e6SJakub Kicinski 
336bfdc5970SChristoph Hellwig struct iovec *iovec_from_user(const struct iovec __user *uvector,
337bfdc5970SChristoph Hellwig 		unsigned long nr_segs, unsigned long fast_segs,
338bfdc5970SChristoph Hellwig 		struct iovec *fast_iov, bool compat);
339bfdc5970SChristoph Hellwig ssize_t import_iovec(int type, const struct iovec __user *uvec,
340bfdc5970SChristoph Hellwig 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
341bfdc5970SChristoph Hellwig 		 struct iov_iter *i);
342bfdc5970SChristoph Hellwig ssize_t __import_iovec(int type, const struct iovec __user *uvec,
343bfdc5970SChristoph Hellwig 		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
344bfdc5970SChristoph Hellwig 		 struct iov_iter *i, bool compat);
3452ad9bd83SJens Axboe int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
346bc917be8SAl Viro 
iov_iter_ubuf(struct iov_iter * i,unsigned int direction,void __user * buf,size_t count)347fcb14cb1SAl Viro static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
348fcb14cb1SAl Viro 			void __user *buf, size_t count)
349fcb14cb1SAl Viro {
350fcb14cb1SAl Viro 	WARN_ON(direction & ~(READ | WRITE));
351fcb14cb1SAl Viro 	*i = (struct iov_iter) {
352fcb14cb1SAl Viro 		.iter_type = ITER_UBUF,
353fcb14cb1SAl Viro 		.data_source = direction,
354fcb14cb1SAl Viro 		.ubuf = buf,
355cd0bd57aSJens Axboe 		.count = count,
356cd0bd57aSJens Axboe 		.nr_segs = 1
357fcb14cb1SAl Viro 	};
358fcb14cb1SAl Viro }
359f62e52d1SDavid Howells /* Flags for iov_iter_get/extract_pages*() */
360f62e52d1SDavid Howells /* Allow P2PDMA on the extracted pages */
361f62e52d1SDavid Howells #define ITER_ALLOW_P2PDMA	((__force iov_iter_extraction_t)0x01)
362f62e52d1SDavid Howells 
3637d58fe73SDavid Howells ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages,
3647d58fe73SDavid Howells 			       size_t maxsize, unsigned int maxpages,
3657d58fe73SDavid Howells 			       iov_iter_extraction_t extraction_flags,
3667d58fe73SDavid Howells 			       size_t *offset0);
3677d58fe73SDavid Howells 
3687d58fe73SDavid Howells /**
3697d58fe73SDavid Howells  * iov_iter_extract_will_pin - Indicate how pages from the iterator will be retained
3707d58fe73SDavid Howells  * @iter: The iterator
3717d58fe73SDavid Howells  *
3727d58fe73SDavid Howells  * Examine the iterator and indicate by returning true or false as to how, if
3737d58fe73SDavid Howells  * at all, pages extracted from the iterator will be retained by the extraction
3747d58fe73SDavid Howells  * function.
3757d58fe73SDavid Howells  *
3767d58fe73SDavid Howells  * %true indicates that the pages will have a pin placed in them that the
3777d58fe73SDavid Howells  * caller must unpin.  This is must be done for DMA/async DIO to force fork()
3787d58fe73SDavid Howells  * to forcibly copy a page for the child (the parent must retain the original
3797d58fe73SDavid Howells  * page).
3807d58fe73SDavid Howells  *
3817d58fe73SDavid Howells  * %false indicates that no measures are taken and that it's up to the caller
3827d58fe73SDavid Howells  * to retain the pages.
3837d58fe73SDavid Howells  */
iov_iter_extract_will_pin(const struct iov_iter * iter)3847d58fe73SDavid Howells static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
3857d58fe73SDavid Howells {
3867d58fe73SDavid Howells 	return user_backed_iter(iter);
3877d58fe73SDavid Howells }
388fcb14cb1SAl Viro 
389f5f82cd1SDavid Howells struct sg_table;
390f5f82cd1SDavid Howells ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
391f5f82cd1SDavid Howells 			   struct sg_table *sgtable, unsigned int sg_max,
392f5f82cd1SDavid Howells 			   iov_iter_extraction_t extraction_flags);
393f5f82cd1SDavid Howells 
394812ed032SJiri Slaby #endif
395