xref: /linux/fs/netfs/iterator.c (revision 35219bc5c71f4197c8bd10297597de797c1eece5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Iterator helpers.
3  *
4  * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/uio.h>
12 #include <linux/scatterlist.h>
13 #include <linux/netfs.h>
14 #include "internal.h"
15 
16 /**
17  * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
18  * @orig: The original iterator
19  * @orig_len: The amount of iterator to copy
20  * @new: The iterator to be set up
21  * @extraction_flags: Flags to qualify the request
22  *
23  * Extract the page fragments from the given amount of the source iterator and
24  * build up a second iterator that refers to all of those bits.  This allows
25  * the original iterator to disposed of.
26  *
27  * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
28  * allowed on the pages extracted.
29  *
30  * On success, the number of elements in the bvec is returned, the original
31  * iterator will have been advanced by the amount extracted.
32  *
33  * The iov_iter_extract_mode() function should be used to query how cleanup
34  * should be performed.
35  */
netfs_extract_user_iter(struct iov_iter * orig,size_t orig_len,struct iov_iter * new,iov_iter_extraction_t extraction_flags)36 ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
37 				struct iov_iter *new,
38 				iov_iter_extraction_t extraction_flags)
39 {
40 	struct bio_vec *bv = NULL;
41 	struct page **pages;
42 	unsigned int cur_npages;
43 	unsigned int max_pages;
44 	unsigned int npages = 0;
45 	unsigned int i;
46 	ssize_t ret;
47 	size_t count = orig_len, offset, len;
48 	size_t bv_size, pg_size;
49 
50 	if (WARN_ON_ONCE(!iter_is_ubuf(orig) && !iter_is_iovec(orig)))
51 		return -EIO;
52 
53 	max_pages = iov_iter_npages(orig, INT_MAX);
54 	bv_size = array_size(max_pages, sizeof(*bv));
55 	bv = kvmalloc(bv_size, GFP_KERNEL);
56 	if (!bv)
57 		return -ENOMEM;
58 
59 	/* Put the page list at the end of the bvec list storage.  bvec
60 	 * elements are larger than page pointers, so as long as we work
61 	 * 0->last, we should be fine.
62 	 */
63 	pg_size = array_size(max_pages, sizeof(*pages));
64 	pages = (void *)bv + bv_size - pg_size;
65 
66 	while (count && npages < max_pages) {
67 		ret = iov_iter_extract_pages(orig, &pages, count,
68 					     max_pages - npages, extraction_flags,
69 					     &offset);
70 		if (ret < 0) {
71 			pr_err("Couldn't get user pages (rc=%zd)\n", ret);
72 			break;
73 		}
74 
75 		if (ret > count) {
76 			pr_err("get_pages rc=%zd more than %zu\n", ret, count);
77 			break;
78 		}
79 
80 		count -= ret;
81 		ret += offset;
82 		cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE);
83 
84 		if (npages + cur_npages > max_pages) {
85 			pr_err("Out of bvec array capacity (%u vs %u)\n",
86 			       npages + cur_npages, max_pages);
87 			break;
88 		}
89 
90 		for (i = 0; i < cur_npages; i++) {
91 			len = ret > PAGE_SIZE ? PAGE_SIZE : ret;
92 			bvec_set_page(bv + npages + i, *pages++, len - offset, offset);
93 			ret -= len;
94 			offset = 0;
95 		}
96 
97 		npages += cur_npages;
98 	}
99 
100 	iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count);
101 	return npages;
102 }
103 EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
104 
105 /*
106  * Select the span of a bvec iterator we're going to use.  Limit it by both maximum
107  * size and maximum number of segments.  Returns the size of the span in bytes.
108  */
netfs_limit_bvec(const struct iov_iter * iter,size_t start_offset,size_t max_size,size_t max_segs)109 static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset,
110 			       size_t max_size, size_t max_segs)
111 {
112 	const struct bio_vec *bvecs = iter->bvec;
113 	unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
114 	size_t len, span = 0, n = iter->count;
115 	size_t skip = iter->iov_offset + start_offset;
116 
117 	if (WARN_ON(!iov_iter_is_bvec(iter)) ||
118 	    WARN_ON(start_offset > n) ||
119 	    n == 0)
120 		return 0;
121 
122 	while (n && ix < nbv && skip) {
123 		len = bvecs[ix].bv_len;
124 		if (skip < len)
125 			break;
126 		skip -= len;
127 		n -= len;
128 		ix++;
129 	}
130 
131 	while (n && ix < nbv) {
132 		len = min3(n, bvecs[ix].bv_len - skip, max_size);
133 		span += len;
134 		nsegs++;
135 		ix++;
136 		if (span >= max_size || nsegs >= max_segs)
137 			break;
138 		skip = 0;
139 		n -= len;
140 	}
141 
142 	return min(span, max_size);
143 }
144 
145 /*
146  * Select the span of an xarray iterator we're going to use.  Limit it by both
147  * maximum size and maximum number of segments.  It is assumed that segments
148  * can be larger than a page in size, provided they're physically contiguous.
149  * Returns the size of the span in bytes.
150  */
netfs_limit_xarray(const struct iov_iter * iter,size_t start_offset,size_t max_size,size_t max_segs)151 static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offset,
152 				 size_t max_size, size_t max_segs)
153 {
154 	struct folio *folio;
155 	unsigned int nsegs = 0;
156 	loff_t pos = iter->xarray_start + iter->iov_offset;
157 	pgoff_t index = pos / PAGE_SIZE;
158 	size_t span = 0, n = iter->count;
159 
160 	XA_STATE(xas, iter->xarray, index);
161 
162 	if (WARN_ON(!iov_iter_is_xarray(iter)) ||
163 	    WARN_ON(start_offset > n) ||
164 	    n == 0)
165 		return 0;
166 	max_size = min(max_size, n - start_offset);
167 
168 	rcu_read_lock();
169 	xas_for_each(&xas, folio, ULONG_MAX) {
170 		size_t offset, flen, len;
171 		if (xas_retry(&xas, folio))
172 			continue;
173 		if (WARN_ON(xa_is_value(folio)))
174 			break;
175 		if (WARN_ON(folio_test_hugetlb(folio)))
176 			break;
177 
178 		flen = folio_size(folio);
179 		offset = offset_in_folio(folio, pos);
180 		len = min(max_size, flen - offset);
181 		span += len;
182 		nsegs++;
183 		if (span >= max_size || nsegs >= max_segs)
184 			break;
185 	}
186 
187 	rcu_read_unlock();
188 	return min(span, max_size);
189 }
190 
191 /*
192  * Select the span of a folio queue iterator we're going to use.  Limit it by
193  * both maximum size and maximum number of segments.  Returns the size of the
194  * span in bytes.
195  */
netfs_limit_folioq(const struct iov_iter * iter,size_t start_offset,size_t max_size,size_t max_segs)196 static size_t netfs_limit_folioq(const struct iov_iter *iter, size_t start_offset,
197 				 size_t max_size, size_t max_segs)
198 {
199 	const struct folio_queue *folioq = iter->folioq;
200 	unsigned int nsegs = 0;
201 	unsigned int slot = iter->folioq_slot;
202 	size_t span = 0, n = iter->count;
203 
204 	if (WARN_ON(!iov_iter_is_folioq(iter)) ||
205 	    WARN_ON(start_offset > n) ||
206 	    n == 0)
207 		return 0;
208 	max_size = umin(max_size, n - start_offset);
209 
210 	if (slot >= folioq_nr_slots(folioq)) {
211 		folioq = folioq->next;
212 		slot = 0;
213 	}
214 
215 	start_offset += iter->iov_offset;
216 	do {
217 		size_t flen = folioq_folio_size(folioq, slot);
218 
219 		if (start_offset < flen) {
220 			span += flen - start_offset;
221 			nsegs++;
222 			start_offset = 0;
223 		} else {
224 			start_offset -= flen;
225 		}
226 		if (span >= max_size || nsegs >= max_segs)
227 			break;
228 
229 		slot++;
230 		if (slot >= folioq_nr_slots(folioq)) {
231 			folioq = folioq->next;
232 			slot = 0;
233 		}
234 	} while (folioq);
235 
236 	return umin(span, max_size);
237 }
238 
netfs_limit_iter(const struct iov_iter * iter,size_t start_offset,size_t max_size,size_t max_segs)239 size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
240 			size_t max_size, size_t max_segs)
241 {
242 	if (iov_iter_is_folioq(iter))
243 		return netfs_limit_folioq(iter, start_offset, max_size, max_segs);
244 	if (iov_iter_is_bvec(iter))
245 		return netfs_limit_bvec(iter, start_offset, max_size, max_segs);
246 	if (iov_iter_is_xarray(iter))
247 		return netfs_limit_xarray(iter, start_offset, max_size, max_segs);
248 	BUG();
249 }
250 EXPORT_SYMBOL(netfs_limit_iter);
251