xref: /linux/lib/scatterlist.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
4  *
5  * Scatterlist handling helpers.
6  */
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/scatterlist.h>
10 #include <linux/highmem.h>
11 #include <linux/kmemleak.h>
12 #include <linux/bvec.h>
13 #include <linux/uio.h>
14 #include <linux/folio_queue.h>
15 
16 /**
17  * sg_nents - return total count of entries in scatterlist
18  * @sg:		The scatterlist
19  *
20  * Description:
21  * Allows to know how many entries are in sg, taking into account
22  * chaining as well
23  *
24  **/
25 int sg_nents(struct scatterlist *sg)
26 {
27 	int nents;
28 	for (nents = 0; sg; sg = sg_next(sg))
29 		nents++;
30 	return nents;
31 }
32 EXPORT_SYMBOL(sg_nents);
33 
34 /**
35  * sg_nents_for_len - return total count of entries in scatterlist
36  *                    needed to satisfy the supplied length
37  * @sg:		The scatterlist
38  * @len:	The total required length
39  *
40  * Description:
41  * Determines the number of entries in sg that are required to meet
42  * the supplied length, taking into account chaining as well
43  *
44  * Returns:
45  *   the number of sg entries needed, negative error on failure
46  *
47  **/
48 int sg_nents_for_len(struct scatterlist *sg, u64 len)
49 {
50 	int nents;
51 	u64 total;
52 
53 	if (!len)
54 		return 0;
55 
56 	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
57 		nents++;
58 		total += sg->length;
59 		if (total >= len)
60 			return nents;
61 	}
62 
63 	return -EINVAL;
64 }
65 EXPORT_SYMBOL(sg_nents_for_len);
66 
67 /**
68  * sg_nents_for_dma - return the count of DMA-capable entries in scatterlist
69  * @sgl:	The scatterlist
70  * @sglen:	The current number of entries
71  * @len:	The maximum length of DMA-capable block
72  *
73  * Description:
74  * Determines the number of entries in @sgl which would be permitted in
75  * DMA-capable transfer if list had been split accordingly, taking into
76  * account chaining as well.
77  *
78  * Returns:
79  *   the number of sgl entries needed
80  *
81  **/
82 int sg_nents_for_dma(struct scatterlist *sgl, unsigned int sglen, size_t len)
83 {
84 	struct scatterlist *sg;
85 	int i, nents = 0;
86 
87 	for_each_sg(sgl, sg, sglen, i)
88 		nents += DIV_ROUND_UP(sg_dma_len(sg), len);
89 	return nents;
90 }
91 EXPORT_SYMBOL(sg_nents_for_dma);
92 
93 /**
94  * sg_last - return the last scatterlist entry in a list
95  * @sgl:	First entry in the scatterlist
96  * @nents:	Number of entries in the scatterlist
97  *
98  * Description:
99  *   Should only be used casually, it (currently) scans the entire list
100  *   to get the last entry.
101  *
102  *   Note that the @sgl pointer passed in need not be the first one,
103  *   the important bit is that @nents denotes the number of entries that
104  *   exist from @sgl.
105  *
106  **/
107 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
108 {
109 	struct scatterlist *sg, *ret = NULL;
110 	unsigned int i;
111 
112 	for_each_sg(sgl, sg, nents, i)
113 		ret = sg;
114 
115 	BUG_ON(!sg_is_last(ret));
116 	return ret;
117 }
118 EXPORT_SYMBOL(sg_last);
119 
120 /**
121  * sg_init_table - Initialize SG table
122  * @sgl:	   The SG table
123  * @nents:	   Number of entries in table
124  *
125  * Notes:
126  *   If this is part of a chained sg table, sg_mark_end() should be
127  *   used only on the last table part.
128  *
129  **/
130 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
131 {
132 	memset(sgl, 0, sizeof(*sgl) * nents);
133 	sg_init_marker(sgl, nents);
134 }
135 EXPORT_SYMBOL(sg_init_table);
136 
137 /**
138  * sg_init_one - Initialize a single entry sg list
139  * @sg:		 SG entry
140  * @buf:	 Virtual address for IO
141  * @buflen:	 IO length
142  *
143  **/
144 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
145 {
146 	sg_init_table(sg, 1);
147 	sg_set_buf(sg, buf, buflen);
148 }
149 EXPORT_SYMBOL(sg_init_one);
150 
151 /*
152  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
153  * helpers.
154  */
155 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
156 {
157 	if (nents == SG_MAX_SINGLE_ALLOC) {
158 		/*
159 		 * Kmemleak doesn't track page allocations as they are not
160 		 * commonly used (in a raw form) for kernel data structures.
161 		 * As we chain together a list of pages and then a normal
162 		 * kmalloc (tracked by kmemleak), in order to for that last
163 		 * allocation not to become decoupled (and thus a
164 		 * false-positive) we need to inform kmemleak of all the
165 		 * intermediate allocations.
166 		 */
167 		void *ptr = (void *) __get_free_page(gfp_mask);
168 		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
169 		return ptr;
170 	} else
171 		return kmalloc_objs(struct scatterlist, nents, gfp_mask);
172 }
173 
174 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
175 {
176 	if (nents == SG_MAX_SINGLE_ALLOC) {
177 		kmemleak_free(sg);
178 		free_page((unsigned long) sg);
179 	} else
180 		kfree(sg);
181 }
182 
183 /**
184  * __sg_free_table - Free a previously mapped sg table
185  * @table:	The sg table header to use
186  * @max_ents:	The maximum number of entries per single scatterlist
187  * @nents_first_chunk: Number of entries int the (preallocated) first
188  * 	scatterlist chunk, 0 means no such preallocated first chunk
189  * @free_fn:	Free function
190  * @num_ents:	Number of entries in the table
191  *
192  *  Description:
193  *    Free an sg table previously allocated and setup with
194  *    __sg_alloc_table().  The @max_ents value must be identical to
195  *    that previously used with __sg_alloc_table().
196  *
197  **/
198 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
199 		     unsigned int nents_first_chunk, sg_free_fn *free_fn,
200 		     unsigned int num_ents)
201 {
202 	struct scatterlist *sgl, *next;
203 	unsigned curr_max_ents = nents_first_chunk ?: max_ents;
204 
205 	if (unlikely(!table->sgl))
206 		return;
207 
208 	sgl = table->sgl;
209 	while (num_ents) {
210 		unsigned int alloc_size = num_ents;
211 		unsigned int sg_size;
212 
213 		/*
214 		 * If we have more than max_ents segments left,
215 		 * then assign 'next' to the sg table after the current one.
216 		 * sg_size is then one less than alloc size, since the last
217 		 * element is the chain pointer.
218 		 */
219 		if (alloc_size > curr_max_ents) {
220 			next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
221 			alloc_size = curr_max_ents;
222 			sg_size = alloc_size - 1;
223 		} else {
224 			sg_size = alloc_size;
225 			next = NULL;
226 		}
227 
228 		num_ents -= sg_size;
229 		if (nents_first_chunk)
230 			nents_first_chunk = 0;
231 		else
232 			free_fn(sgl, alloc_size);
233 		sgl = next;
234 		curr_max_ents = max_ents;
235 	}
236 
237 	table->sgl = NULL;
238 }
239 EXPORT_SYMBOL(__sg_free_table);
240 
241 /**
242  * sg_free_append_table - Free a previously allocated append sg table.
243  * @table:	 The mapped sg append table header
244  *
245  **/
246 void sg_free_append_table(struct sg_append_table *table)
247 {
248 	__sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
249 			table->total_nents);
250 }
251 EXPORT_SYMBOL(sg_free_append_table);
252 
253 
254 /**
255  * sg_free_table - Free a previously allocated sg table
256  * @table:	The mapped sg table header
257  *
258  **/
259 void sg_free_table(struct sg_table *table)
260 {
261 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
262 			table->orig_nents);
263 }
264 EXPORT_SYMBOL(sg_free_table);
265 
266 /**
267  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
268  * @table:	The sg table header to use
269  * @nents:	Number of entries in sg list
270  * @max_ents:	The maximum number of entries the allocator returns per call
271  * @first_chunk: first SGL if preallocated (may be %NULL)
272  * @nents_first_chunk: Number of entries in the (preallocated) first
273  * 	scatterlist chunk, 0 means no such preallocated chunk provided by user
274  * @gfp_mask:	GFP allocation mask
275  * @alloc_fn:	Allocator to use
276  *
277  * Description:
278  *   This function returns a @table @nents long. The allocator is
279  *   defined to return scatterlist chunks of maximum size @max_ents.
280  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
281  *   chained in units of @max_ents.
282  *
283  * Notes:
284  *   If this function returns non-0 (eg failure), the caller must call
285  *   __sg_free_table() to cleanup any leftover allocations.
286  *
287  **/
288 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
289 		     unsigned int max_ents, struct scatterlist *first_chunk,
290 		     unsigned int nents_first_chunk, gfp_t gfp_mask,
291 		     sg_alloc_fn *alloc_fn)
292 {
293 	struct scatterlist *sg, *prv;
294 	unsigned int left;
295 	unsigned curr_max_ents = nents_first_chunk ?: max_ents;
296 	unsigned prv_max_ents;
297 
298 	memset(table, 0, sizeof(*table));
299 
300 	if (nents == 0)
301 		return -EINVAL;
302 #ifdef CONFIG_ARCH_NO_SG_CHAIN
303 	if (WARN_ON_ONCE(nents > max_ents))
304 		return -EINVAL;
305 #endif
306 
307 	left = nents;
308 	prv = NULL;
309 	do {
310 		unsigned int sg_size, alloc_size = left;
311 
312 		if (alloc_size > curr_max_ents) {
313 			alloc_size = curr_max_ents;
314 			sg_size = alloc_size - 1;
315 		} else
316 			sg_size = alloc_size;
317 
318 		left -= sg_size;
319 
320 		if (first_chunk) {
321 			sg = first_chunk;
322 			first_chunk = NULL;
323 		} else {
324 			sg = alloc_fn(alloc_size, gfp_mask);
325 		}
326 		if (unlikely(!sg)) {
327 			/*
328 			 * Adjust entry count to reflect that the last
329 			 * entry of the previous table won't be used for
330 			 * linkage.  Without this, sg_kfree() may get
331 			 * confused.
332 			 */
333 			if (prv)
334 				table->nents = ++table->orig_nents;
335 
336 			return -ENOMEM;
337 		}
338 
339 		sg_init_table(sg, alloc_size);
340 		table->nents = table->orig_nents += sg_size;
341 
342 		/*
343 		 * If this is the first mapping, assign the sg table header.
344 		 * If this is not the first mapping, chain previous part.
345 		 */
346 		if (prv)
347 			sg_chain(prv, prv_max_ents, sg);
348 		else
349 			table->sgl = sg;
350 
351 		/*
352 		 * If no more entries after this one, mark the end
353 		 */
354 		if (!left)
355 			sg_mark_end(&sg[sg_size - 1]);
356 
357 		prv = sg;
358 		prv_max_ents = curr_max_ents;
359 		curr_max_ents = max_ents;
360 	} while (left);
361 
362 	return 0;
363 }
364 EXPORT_SYMBOL(__sg_alloc_table);
365 
366 /**
367  * sg_alloc_table - Allocate and initialize an sg table
368  * @table:	The sg table header to use
369  * @nents:	Number of entries in sg list
370  * @gfp_mask:	GFP allocation mask
371  *
372  *  Description:
373  *    Allocate and initialize an sg table. If @nents is larger than
374  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
375  *
376  **/
377 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
378 {
379 	int ret;
380 
381 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
382 			       NULL, 0, gfp_mask, sg_kmalloc);
383 	if (unlikely(ret))
384 		sg_free_table(table);
385 	return ret;
386 }
387 EXPORT_SYMBOL(sg_alloc_table);
388 
389 static struct scatterlist *get_next_sg(struct sg_append_table *table,
390 				       struct scatterlist *cur,
391 				       unsigned long needed_sges,
392 				       gfp_t gfp_mask)
393 {
394 	struct scatterlist *new_sg, *next_sg;
395 	unsigned int alloc_size;
396 
397 	if (cur) {
398 		next_sg = sg_next(cur);
399 		/* Check if last entry should be keeped for chainning */
400 		if (!sg_is_last(next_sg) || needed_sges == 1)
401 			return next_sg;
402 	}
403 
404 	alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
405 	new_sg = sg_kmalloc(alloc_size, gfp_mask);
406 	if (!new_sg)
407 		return ERR_PTR(-ENOMEM);
408 	sg_init_table(new_sg, alloc_size);
409 	if (cur) {
410 		table->total_nents += alloc_size - 1;
411 		__sg_chain(next_sg, new_sg);
412 	} else {
413 		table->sgt.sgl = new_sg;
414 		table->total_nents = alloc_size;
415 	}
416 	return new_sg;
417 }
418 
419 static bool pages_are_mergeable(struct page *a, struct page *b)
420 {
421 	if (page_to_pfn(a) != page_to_pfn(b) + 1)
422 		return false;
423 	if (!zone_device_pages_have_same_pgmap(a, b))
424 		return false;
425 	return true;
426 }
427 
428 /**
429  * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
430  *                                    table from an array of pages
431  * @sgt_append:  The sg append table to use
432  * @pages:       Pointer to an array of page pointers
433  * @n_pages:     Number of pages in the pages array
434  * @offset:      Offset from start of the first page to the start of a buffer
435  * @size:        Number of valid bytes in the buffer (after offset)
436  * @max_segment: Maximum size of a scatterlist element in bytes
437  * @left_pages:  Left pages caller have to set after this call
438  * @gfp_mask:	 GFP allocation mask
439  *
440  * Description:
441  *    In the first call it allocate and initialize an sg table from a list of
442  *    pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
443  *    the pages are squashed into a single scatterlist entry up to the maximum
444  *    size specified in @max_segment.  A user may provide an offset at a start
445  *    and a size of valid data in a buffer specified by the page array. The
446  *    returned sg table is released by sg_free_append_table
447  *
448  * Returns:
449  *   0 on success, negative error on failure
450  *
451  * Notes:
452  *   If this function returns non-0 (eg failure), the caller must call
453  *   sg_free_append_table() to cleanup any leftover allocations.
454  *
455  *   In the fist call, sgt_append must by initialized.
456  */
457 int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
458 		struct page **pages, unsigned int n_pages, unsigned int offset,
459 		unsigned long size, unsigned int max_segment,
460 		unsigned int left_pages, gfp_t gfp_mask)
461 {
462 	unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
463 	unsigned int added_nents = 0;
464 	struct scatterlist *s = sgt_append->prv;
465 	struct page *last_pg;
466 
467 	/*
468 	 * The algorithm below requires max_segment to be aligned to PAGE_SIZE
469 	 * otherwise it can overshoot.
470 	 */
471 	max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
472 	if (WARN_ON(max_segment < PAGE_SIZE))
473 		return -EINVAL;
474 
475 	if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
476 		return -EOPNOTSUPP;
477 
478 	if (sgt_append->prv) {
479 		unsigned long next_pfn;
480 
481 		if (WARN_ON(offset))
482 			return -EINVAL;
483 
484 		/* Merge contiguous pages into the last SG */
485 		prv_len = sgt_append->prv->length;
486 		next_pfn = (sg_phys(sgt_append->prv) + prv_len) / PAGE_SIZE;
487 		if (page_to_pfn(pages[0]) == next_pfn) {
488 			last_pg = pfn_to_page(next_pfn - 1);
489 			while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
490 				if (sgt_append->prv->length + PAGE_SIZE > max_segment)
491 					break;
492 				sgt_append->prv->length += PAGE_SIZE;
493 				last_pg = pages[0];
494 				pages++;
495 				n_pages--;
496 			}
497 			if (!n_pages)
498 				goto out;
499 		}
500 	}
501 
502 	/* compute number of contiguous chunks */
503 	chunks = 1;
504 	seg_len = 0;
505 	for (i = 1; i < n_pages; i++) {
506 		seg_len += PAGE_SIZE;
507 		if (seg_len >= max_segment ||
508 		    !pages_are_mergeable(pages[i], pages[i - 1])) {
509 			chunks++;
510 			seg_len = 0;
511 		}
512 	}
513 
514 	/* merging chunks and putting them into the scatterlist */
515 	cur_page = 0;
516 	for (i = 0; i < chunks; i++) {
517 		unsigned int j, chunk_size;
518 
519 		/* look for the end of the current chunk */
520 		seg_len = 0;
521 		for (j = cur_page + 1; j < n_pages; j++) {
522 			seg_len += PAGE_SIZE;
523 			if (seg_len >= max_segment ||
524 			    !pages_are_mergeable(pages[j], pages[j - 1]))
525 				break;
526 		}
527 
528 		/* Pass how many chunks might be left */
529 		s = get_next_sg(sgt_append, s, chunks - i + left_pages,
530 				gfp_mask);
531 		if (IS_ERR(s)) {
532 			/*
533 			 * Adjust entry length to be as before function was
534 			 * called.
535 			 */
536 			if (sgt_append->prv)
537 				sgt_append->prv->length = prv_len;
538 			return PTR_ERR(s);
539 		}
540 		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
541 		sg_set_page(s, pages[cur_page],
542 			    min_t(unsigned long, size, chunk_size), offset);
543 		added_nents++;
544 		size -= chunk_size;
545 		offset = 0;
546 		cur_page = j;
547 	}
548 	sgt_append->sgt.nents += added_nents;
549 	sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
550 	sgt_append->prv = s;
551 out:
552 	if (!left_pages)
553 		sg_mark_end(s);
554 	return 0;
555 }
556 EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
557 
558 /**
559  * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
560  *                                     an array of pages and given maximum
561  *                                     segment.
562  * @sgt:	 The sg table header to use
563  * @pages:	 Pointer to an array of page pointers
564  * @n_pages:	 Number of pages in the pages array
565  * @offset:      Offset from start of the first page to the start of a buffer
566  * @size:        Number of valid bytes in the buffer (after offset)
567  * @max_segment: Maximum size of a scatterlist element in bytes
568  * @gfp_mask:	 GFP allocation mask
569  *
570  *  Description:
571  *    Allocate and initialize an sg table from a list of pages. Contiguous
572  *    ranges of the pages are squashed into a single scatterlist node up to the
573  *    maximum size specified in @max_segment. A user may provide an offset at a
574  *    start and a size of valid data in a buffer specified by the page array.
575  *
576  *    The returned sg table is released by sg_free_table.
577  *
578  *  Returns:
579  *   0 on success, negative error on failure
580  */
581 int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
582 				unsigned int n_pages, unsigned int offset,
583 				unsigned long size, unsigned int max_segment,
584 				gfp_t gfp_mask)
585 {
586 	struct sg_append_table append = {};
587 	int err;
588 
589 	err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
590 					       size, max_segment, 0, gfp_mask);
591 	if (err) {
592 		sg_free_append_table(&append);
593 		return err;
594 	}
595 	memcpy(sgt, &append.sgt, sizeof(*sgt));
596 	WARN_ON(append.total_nents != sgt->orig_nents);
597 	return 0;
598 }
599 EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
600 
601 #ifdef CONFIG_SGL_ALLOC
602 
603 /**
604  * sgl_alloc_order - allocate a scatterlist and its pages
605  * @length: Length in bytes of the scatterlist. Must be at least one
606  * @order: Second argument for alloc_pages()
607  * @chainable: Whether or not to allocate an extra element in the scatterlist
608  *	for scatterlist chaining purposes
609  * @gfp: Memory allocation flags
610  * @nent_p: [out] Number of entries in the scatterlist that have pages
611  *
612  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
613  */
614 struct scatterlist *sgl_alloc_order(unsigned long long length,
615 				    unsigned int order, bool chainable,
616 				    gfp_t gfp, unsigned int *nent_p)
617 {
618 	struct scatterlist *sgl, *sg;
619 	struct page *page;
620 	unsigned int nent, nalloc;
621 	u32 elem_len;
622 
623 	nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
624 	/* Check for integer overflow */
625 	if (length > (nent << (PAGE_SHIFT + order)))
626 		return NULL;
627 	nalloc = nent;
628 	if (chainable) {
629 		/* Check for integer overflow */
630 		if (nalloc + 1 < nalloc)
631 			return NULL;
632 		nalloc++;
633 	}
634 	sgl = kmalloc_objs(struct scatterlist, nalloc, gfp & ~GFP_DMA);
635 	if (!sgl)
636 		return NULL;
637 
638 	sg_init_table(sgl, nalloc);
639 	sg = sgl;
640 	while (length) {
641 		elem_len = min_t(u64, length, PAGE_SIZE << order);
642 		page = alloc_pages(gfp, order);
643 		if (!page) {
644 			sgl_free_order(sgl, order);
645 			return NULL;
646 		}
647 
648 		sg_set_page(sg, page, elem_len, 0);
649 		length -= elem_len;
650 		sg = sg_next(sg);
651 	}
652 	WARN_ONCE(length, "length = %lld\n", length);
653 	if (nent_p)
654 		*nent_p = nent;
655 	return sgl;
656 }
657 EXPORT_SYMBOL(sgl_alloc_order);
658 
659 /**
660  * sgl_alloc - allocate a scatterlist and its pages
661  * @length: Length in bytes of the scatterlist
662  * @gfp: Memory allocation flags
663  * @nent_p: [out] Number of entries in the scatterlist
664  *
665  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
666  */
667 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
668 			      unsigned int *nent_p)
669 {
670 	return sgl_alloc_order(length, 0, false, gfp, nent_p);
671 }
672 EXPORT_SYMBOL(sgl_alloc);
673 
674 /**
675  * sgl_free_n_order - free a scatterlist and its pages
676  * @sgl: Scatterlist with one or more elements
677  * @nents: Maximum number of elements to free
678  * @order: Second argument for __free_pages()
679  *
680  * Notes:
681  * - If several scatterlists have been chained and each chain element is
682  *   freed separately then it's essential to set nents correctly to avoid that a
683  *   page would get freed twice.
684  * - All pages in a chained scatterlist can be freed at once by setting @nents
685  *   to a high number.
686  */
687 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
688 {
689 	struct scatterlist *sg;
690 	struct page *page;
691 	int i;
692 
693 	for_each_sg(sgl, sg, nents, i) {
694 		if (!sg)
695 			break;
696 		page = sg_page(sg);
697 		if (page)
698 			__free_pages(page, order);
699 	}
700 	kfree(sgl);
701 }
702 EXPORT_SYMBOL(sgl_free_n_order);
703 
704 /**
705  * sgl_free_order - free a scatterlist and its pages
706  * @sgl: Scatterlist with one or more elements
707  * @order: Second argument for __free_pages()
708  */
709 void sgl_free_order(struct scatterlist *sgl, int order)
710 {
711 	sgl_free_n_order(sgl, INT_MAX, order);
712 }
713 EXPORT_SYMBOL(sgl_free_order);
714 
715 /**
716  * sgl_free - free a scatterlist and its pages
717  * @sgl: Scatterlist with one or more elements
718  */
719 void sgl_free(struct scatterlist *sgl)
720 {
721 	sgl_free_order(sgl, 0);
722 }
723 EXPORT_SYMBOL(sgl_free);
724 
725 #endif /* CONFIG_SGL_ALLOC */
726 
727 void __sg_page_iter_start(struct sg_page_iter *piter,
728 			  struct scatterlist *sglist, unsigned int nents,
729 			  unsigned long pgoffset)
730 {
731 	piter->__pg_advance = 0;
732 	piter->__nents = nents;
733 
734 	piter->sg = sglist;
735 	piter->sg_pgoffset = pgoffset;
736 }
737 EXPORT_SYMBOL(__sg_page_iter_start);
738 
739 static int sg_page_count(struct scatterlist *sg)
740 {
741 	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
742 }
743 
744 bool __sg_page_iter_next(struct sg_page_iter *piter)
745 {
746 	if (!piter->__nents || !piter->sg)
747 		return false;
748 
749 	piter->sg_pgoffset += piter->__pg_advance;
750 	piter->__pg_advance = 1;
751 
752 	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
753 		piter->sg_pgoffset -= sg_page_count(piter->sg);
754 		piter->sg = sg_next(piter->sg);
755 		if (!--piter->__nents || !piter->sg)
756 			return false;
757 	}
758 
759 	return true;
760 }
761 EXPORT_SYMBOL(__sg_page_iter_next);
762 
763 static int sg_dma_page_count(struct scatterlist *sg)
764 {
765 	return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
766 }
767 
768 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
769 {
770 	struct sg_page_iter *piter = &dma_iter->base;
771 
772 	if (!piter->__nents || !piter->sg)
773 		return false;
774 
775 	piter->sg_pgoffset += piter->__pg_advance;
776 	piter->__pg_advance = 1;
777 
778 	while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
779 		piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
780 		piter->sg = sg_next(piter->sg);
781 		if (!--piter->__nents || !piter->sg)
782 			return false;
783 	}
784 
785 	return true;
786 }
787 EXPORT_SYMBOL(__sg_page_iter_dma_next);
788 
789 /**
790  * sg_miter_start - start mapping iteration over a sg list
791  * @miter: sg mapping iter to be started
792  * @sgl: sg list to iterate over
793  * @nents: number of sg entries
794  * @flags: sg iterator flags
795  *
796  * Description:
797  *   Starts mapping iterator @miter.
798  *
799  * Context:
800  *   Don't care.
801  */
802 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
803 		    unsigned int nents, unsigned int flags)
804 {
805 	memset(miter, 0, sizeof(struct sg_mapping_iter));
806 
807 	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
808 	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
809 	miter->__flags = flags;
810 }
811 EXPORT_SYMBOL(sg_miter_start);
812 
813 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
814 {
815 	if (!miter->__remaining) {
816 		struct scatterlist *sg;
817 
818 		if (!__sg_page_iter_next(&miter->piter))
819 			return false;
820 
821 		sg = miter->piter.sg;
822 
823 		miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
824 		miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
825 		miter->__offset &= PAGE_SIZE - 1;
826 		miter->__remaining = sg->offset + sg->length -
827 				     (miter->piter.sg_pgoffset << PAGE_SHIFT) -
828 				     miter->__offset;
829 		miter->__remaining = min_t(unsigned long, miter->__remaining,
830 					   PAGE_SIZE - miter->__offset);
831 	}
832 
833 	return true;
834 }
835 
836 /**
837  * sg_miter_skip - reposition mapping iterator
838  * @miter: sg mapping iter to be skipped
839  * @offset: number of bytes to plus the current location
840  *
841  * Description:
842  *   Sets the offset of @miter to its current location plus @offset bytes.
843  *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
844  *   stops @miter.
845  *
846  * Context:
847  *   Don't care.
848  *
849  * Returns:
850  *   true if @miter contains the valid mapping.  false if end of sg
851  *   list is reached.
852  */
853 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
854 {
855 	sg_miter_stop(miter);
856 
857 	while (offset) {
858 		off_t consumed;
859 
860 		if (!sg_miter_get_next_page(miter))
861 			return false;
862 
863 		consumed = min_t(off_t, offset, miter->__remaining);
864 		miter->__offset += consumed;
865 		miter->__remaining -= consumed;
866 		offset -= consumed;
867 	}
868 
869 	return true;
870 }
871 EXPORT_SYMBOL(sg_miter_skip);
872 
873 /**
874  * sg_miter_next - proceed mapping iterator to the next mapping
875  * @miter: sg mapping iter to proceed
876  *
877  * Description:
878  *   Proceeds @miter to the next mapping.  @miter should have been started
879  *   using sg_miter_start().  On successful return, @miter->page,
880  *   @miter->addr and @miter->length point to the current mapping.
881  *
882  * Context:
883  *   May sleep if !SG_MITER_ATOMIC && !SG_MITER_LOCAL.
884  *
885  * Returns:
886  *   true if @miter contains the next mapping.  false if end of sg
887  *   list is reached.
888  */
889 bool sg_miter_next(struct sg_mapping_iter *miter)
890 {
891 	sg_miter_stop(miter);
892 
893 	/*
894 	 * Get to the next page if necessary.
895 	 * __remaining, __offset is adjusted by sg_miter_stop
896 	 */
897 	if (!sg_miter_get_next_page(miter))
898 		return false;
899 
900 	miter->page = sg_page_iter_page(&miter->piter);
901 	miter->consumed = miter->length = miter->__remaining;
902 
903 	if (miter->__flags & SG_MITER_ATOMIC)
904 		miter->addr = kmap_atomic(miter->page) + miter->__offset;
905 	else if (miter->__flags & SG_MITER_LOCAL)
906 		miter->addr = kmap_local_page(miter->page) + miter->__offset;
907 	else
908 		miter->addr = kmap(miter->page) + miter->__offset;
909 
910 	return true;
911 }
912 EXPORT_SYMBOL(sg_miter_next);
913 
914 /**
915  * sg_miter_stop - stop mapping iteration
916  * @miter: sg mapping iter to be stopped
917  *
918  * Description:
919  *   Stops mapping iterator @miter.  @miter should have been started
920  *   using sg_miter_start().  A stopped iteration can be resumed by
921  *   calling sg_miter_next() on it.  This is useful when resources (kmap)
922  *   need to be released during iteration.
923  *
924  * Context:
925  *   Don't care otherwise.
926  */
927 void sg_miter_stop(struct sg_mapping_iter *miter)
928 {
929 	WARN_ON(miter->consumed > miter->length);
930 
931 	/* drop resources from the last iteration */
932 	if (miter->addr) {
933 		miter->__offset += miter->consumed;
934 		miter->__remaining -= miter->consumed;
935 
936 		if (miter->__flags & SG_MITER_TO_SG)
937 			flush_dcache_page(miter->page);
938 
939 		if (miter->__flags & SG_MITER_ATOMIC) {
940 			WARN_ON_ONCE(!pagefault_disabled());
941 			kunmap_atomic(miter->addr);
942 		} else if (miter->__flags & SG_MITER_LOCAL)
943 			kunmap_local(miter->addr);
944 		else
945 			kunmap(miter->page);
946 
947 		miter->page = NULL;
948 		miter->addr = NULL;
949 		miter->length = 0;
950 		miter->consumed = 0;
951 	}
952 }
953 EXPORT_SYMBOL(sg_miter_stop);
954 
955 /**
956  * sg_copy_buffer - Copy data between a linear buffer and an SG list
957  * @sgl:		 The SG list
958  * @nents:		 Number of SG entries
959  * @buf:		 Where to copy from
960  * @buflen:		 The number of bytes to copy
961  * @skip:		 Number of bytes to skip before copying
962  * @to_buffer:		 transfer direction (true == from an sg list to a
963  *			 buffer, false == from a buffer to an sg list)
964  *
965  * Returns the number of copied bytes.
966  *
967  **/
968 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
969 		      size_t buflen, off_t skip, bool to_buffer)
970 {
971 	unsigned int offset = 0;
972 	struct sg_mapping_iter miter;
973 	unsigned int sg_flags = SG_MITER_LOCAL;
974 
975 	if (to_buffer)
976 		sg_flags |= SG_MITER_FROM_SG;
977 	else
978 		sg_flags |= SG_MITER_TO_SG;
979 
980 	sg_miter_start(&miter, sgl, nents, sg_flags);
981 
982 	if (!sg_miter_skip(&miter, skip))
983 		return 0;
984 
985 	while ((offset < buflen) && sg_miter_next(&miter)) {
986 		unsigned int len;
987 
988 		len = min(miter.length, buflen - offset);
989 
990 		if (to_buffer)
991 			memcpy(buf + offset, miter.addr, len);
992 		else
993 			memcpy(miter.addr, buf + offset, len);
994 
995 		offset += len;
996 	}
997 
998 	sg_miter_stop(&miter);
999 
1000 	return offset;
1001 }
1002 EXPORT_SYMBOL(sg_copy_buffer);
1003 
1004 /**
1005  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
1006  * @sgl:		 The SG list
1007  * @nents:		 Number of SG entries
1008  * @buf:		 Where to copy from
1009  * @buflen:		 The number of bytes to copy
1010  *
1011  * Returns the number of copied bytes.
1012  *
1013  **/
1014 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1015 			   const void *buf, size_t buflen)
1016 {
1017 	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
1018 }
1019 EXPORT_SYMBOL(sg_copy_from_buffer);
1020 
1021 /**
1022  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
1023  * @sgl:		 The SG list
1024  * @nents:		 Number of SG entries
1025  * @buf:		 Where to copy to
1026  * @buflen:		 The number of bytes to copy
1027  *
1028  * Returns the number of copied bytes.
1029  *
1030  **/
1031 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1032 			 void *buf, size_t buflen)
1033 {
1034 	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
1035 }
1036 EXPORT_SYMBOL(sg_copy_to_buffer);
1037 
1038 /**
1039  * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
1040  * @sgl:		 The SG list
1041  * @nents:		 Number of SG entries
1042  * @buf:		 Where to copy from
1043  * @buflen:		 The number of bytes to copy
1044  * @skip:		 Number of bytes to skip before copying
1045  *
1046  * Returns the number of copied bytes.
1047  *
1048  **/
1049 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1050 			    const void *buf, size_t buflen, off_t skip)
1051 {
1052 	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
1053 }
1054 EXPORT_SYMBOL(sg_pcopy_from_buffer);
1055 
1056 /**
1057  * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
1058  * @sgl:		 The SG list
1059  * @nents:		 Number of SG entries
1060  * @buf:		 Where to copy to
1061  * @buflen:		 The number of bytes to copy
1062  * @skip:		 Number of bytes to skip before copying
1063  *
1064  * Returns the number of copied bytes.
1065  *
1066  **/
1067 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1068 			  void *buf, size_t buflen, off_t skip)
1069 {
1070 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
1071 }
1072 EXPORT_SYMBOL(sg_pcopy_to_buffer);
1073 
1074 /**
1075  * sg_zero_buffer - Zero-out a part of a SG list
1076  * @sgl:		 The SG list
1077  * @nents:		 Number of SG entries
1078  * @buflen:		 The number of bytes to zero out
1079  * @skip:		 Number of bytes to skip before zeroing
1080  *
1081  * Returns the number of bytes zeroed.
1082  **/
1083 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
1084 		       size_t buflen, off_t skip)
1085 {
1086 	unsigned int offset = 0;
1087 	struct sg_mapping_iter miter;
1088 	unsigned int sg_flags = SG_MITER_LOCAL | SG_MITER_TO_SG;
1089 
1090 	sg_miter_start(&miter, sgl, nents, sg_flags);
1091 
1092 	if (!sg_miter_skip(&miter, skip))
1093 		return false;
1094 
1095 	while (offset < buflen && sg_miter_next(&miter)) {
1096 		unsigned int len;
1097 
1098 		len = min(miter.length, buflen - offset);
1099 		memset(miter.addr, 0, len);
1100 
1101 		offset += len;
1102 	}
1103 
1104 	sg_miter_stop(&miter);
1105 	return offset;
1106 }
1107 EXPORT_SYMBOL(sg_zero_buffer);
1108 
1109 /*
1110  * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class
1111  * iterators, and add them to the scatterlist.
1112  */
1113 static ssize_t extract_user_to_sg(struct iov_iter *iter,
1114 				  ssize_t maxsize,
1115 				  struct sg_table *sgtable,
1116 				  unsigned int sg_max,
1117 				  iov_iter_extraction_t extraction_flags)
1118 {
1119 	struct scatterlist *sg = sgtable->sgl + sgtable->nents;
1120 	struct page **pages;
1121 	unsigned int npages;
1122 	ssize_t ret = 0, res;
1123 	size_t len, off;
1124 
1125 	/* We decant the page list into the tail of the scatterlist */
1126 	pages = (void *)sgtable->sgl +
1127 		array_size(sg_max, sizeof(struct scatterlist));
1128 	pages -= sg_max;
1129 
1130 	do {
1131 		res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
1132 					     extraction_flags, &off);
1133 		if (res <= 0)
1134 			goto failed;
1135 
1136 		len = res;
1137 		maxsize -= len;
1138 		ret += len;
1139 		npages = DIV_ROUND_UP(off + len, PAGE_SIZE);
1140 		sg_max -= npages;
1141 
1142 		for (; npages > 0; npages--) {
1143 			struct page *page = *pages;
1144 			size_t seg = min_t(size_t, PAGE_SIZE - off, len);
1145 
1146 			*pages++ = NULL;
1147 			sg_set_page(sg, page, seg, off);
1148 			sgtable->nents++;
1149 			sg++;
1150 			len -= seg;
1151 			off = 0;
1152 		}
1153 	} while (maxsize > 0 && sg_max > 0);
1154 
1155 	return ret;
1156 
1157 failed:
1158 	while (sgtable->nents > sgtable->orig_nents)
1159 		unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
1160 	return res;
1161 }
1162 
1163 /*
1164  * Extract up to sg_max pages from a BVEC-type iterator and add them to the
1165  * scatterlist.  The pages are not pinned.
1166  */
1167 static ssize_t extract_bvec_to_sg(struct iov_iter *iter,
1168 				  ssize_t maxsize,
1169 				  struct sg_table *sgtable,
1170 				  unsigned int sg_max,
1171 				  iov_iter_extraction_t extraction_flags)
1172 {
1173 	const struct bio_vec *bv = iter->bvec;
1174 	struct scatterlist *sg = sgtable->sgl + sgtable->nents;
1175 	unsigned long start = iter->iov_offset;
1176 	unsigned int i;
1177 	ssize_t ret = 0;
1178 
1179 	for (i = 0; i < iter->nr_segs; i++) {
1180 		size_t off, len;
1181 
1182 		len = bv[i].bv_len;
1183 		if (start >= len) {
1184 			start -= len;
1185 			continue;
1186 		}
1187 
1188 		len = min_t(size_t, maxsize, len - start);
1189 		off = bv[i].bv_offset + start;
1190 
1191 		sg_set_page(sg, bv[i].bv_page, len, off);
1192 		sgtable->nents++;
1193 		sg++;
1194 		sg_max--;
1195 
1196 		ret += len;
1197 		maxsize -= len;
1198 		if (maxsize <= 0 || sg_max == 0)
1199 			break;
1200 		start = 0;
1201 	}
1202 
1203 	if (ret > 0)
1204 		iov_iter_advance(iter, ret);
1205 	return ret;
1206 }
1207 
1208 /*
1209  * Extract up to sg_max pages from a KVEC-type iterator and add them to the
1210  * scatterlist.  This can deal with vmalloc'd buffers as well as kmalloc'd or
1211  * static buffers.  The pages are not pinned.
1212  */
1213 static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
1214 				  ssize_t maxsize,
1215 				  struct sg_table *sgtable,
1216 				  unsigned int sg_max,
1217 				  iov_iter_extraction_t extraction_flags)
1218 {
1219 	const struct kvec *kv = iter->kvec;
1220 	struct scatterlist *sg = sgtable->sgl + sgtable->nents;
1221 	unsigned long start = iter->iov_offset;
1222 	unsigned int i;
1223 	ssize_t ret = 0;
1224 
1225 	for (i = 0; i < iter->nr_segs; i++) {
1226 		struct page *page;
1227 		unsigned long kaddr;
1228 		size_t off, len, seg;
1229 
1230 		len = kv[i].iov_len;
1231 		if (start >= len) {
1232 			start -= len;
1233 			continue;
1234 		}
1235 
1236 		kaddr = (unsigned long)kv[i].iov_base + start;
1237 		off = kaddr & ~PAGE_MASK;
1238 		len = min_t(size_t, maxsize, len - start);
1239 		kaddr &= PAGE_MASK;
1240 
1241 		maxsize -= len;
1242 		ret += len;
1243 		do {
1244 			seg = min_t(size_t, len, PAGE_SIZE - off);
1245 			if (is_vmalloc_or_module_addr((void *)kaddr))
1246 				page = vmalloc_to_page((void *)kaddr);
1247 			else
1248 				page = virt_to_page((void *)kaddr);
1249 
1250 			sg_set_page(sg, page, len, off);
1251 			sgtable->nents++;
1252 			sg++;
1253 			sg_max--;
1254 
1255 			len -= seg;
1256 			kaddr += PAGE_SIZE;
1257 			off = 0;
1258 		} while (len > 0 && sg_max > 0);
1259 
1260 		if (maxsize <= 0 || sg_max == 0)
1261 			break;
1262 		start = 0;
1263 	}
1264 
1265 	if (ret > 0)
1266 		iov_iter_advance(iter, ret);
1267 	return ret;
1268 }
1269 
1270 /*
1271  * Extract up to sg_max folios from an FOLIOQ-type iterator and add them to
1272  * the scatterlist.  The pages are not pinned.
1273  */
1274 static ssize_t extract_folioq_to_sg(struct iov_iter *iter,
1275 				   ssize_t maxsize,
1276 				   struct sg_table *sgtable,
1277 				   unsigned int sg_max,
1278 				   iov_iter_extraction_t extraction_flags)
1279 {
1280 	const struct folio_queue *folioq = iter->folioq;
1281 	struct scatterlist *sg = sgtable->sgl + sgtable->nents;
1282 	unsigned int slot = iter->folioq_slot;
1283 	ssize_t ret = 0;
1284 	size_t offset = iter->iov_offset;
1285 
1286 	BUG_ON(!folioq);
1287 
1288 	if (slot >= folioq_nr_slots(folioq)) {
1289 		folioq = folioq->next;
1290 		if (WARN_ON_ONCE(!folioq))
1291 			return 0;
1292 		slot = 0;
1293 	}
1294 
1295 	do {
1296 		struct folio *folio = folioq_folio(folioq, slot);
1297 		size_t fsize = folioq_folio_size(folioq, slot);
1298 
1299 		if (offset < fsize) {
1300 			size_t part = umin(maxsize - ret, fsize - offset);
1301 
1302 			sg_set_page(sg, folio_page(folio, 0), part, offset);
1303 			sgtable->nents++;
1304 			sg++;
1305 			sg_max--;
1306 			offset += part;
1307 			ret += part;
1308 		}
1309 
1310 		if (offset >= fsize) {
1311 			offset = 0;
1312 			slot++;
1313 			if (slot >= folioq_nr_slots(folioq)) {
1314 				if (!folioq->next) {
1315 					WARN_ON_ONCE(ret < iter->count);
1316 					break;
1317 				}
1318 				folioq = folioq->next;
1319 				slot = 0;
1320 			}
1321 		}
1322 	} while (sg_max > 0 && ret < maxsize);
1323 
1324 	iter->folioq = folioq;
1325 	iter->folioq_slot = slot;
1326 	iter->iov_offset = offset;
1327 	iter->count -= ret;
1328 	return ret;
1329 }
1330 
1331 /*
1332  * Extract up to sg_max folios from an XARRAY-type iterator and add them to
1333  * the scatterlist.  The pages are not pinned.
1334  */
1335 static ssize_t extract_xarray_to_sg(struct iov_iter *iter,
1336 				    ssize_t maxsize,
1337 				    struct sg_table *sgtable,
1338 				    unsigned int sg_max,
1339 				    iov_iter_extraction_t extraction_flags)
1340 {
1341 	struct scatterlist *sg = sgtable->sgl + sgtable->nents;
1342 	struct xarray *xa = iter->xarray;
1343 	struct folio *folio;
1344 	loff_t start = iter->xarray_start + iter->iov_offset;
1345 	pgoff_t index = start / PAGE_SIZE;
1346 	ssize_t ret = 0;
1347 	size_t offset, len;
1348 	XA_STATE(xas, xa, index);
1349 
1350 	rcu_read_lock();
1351 
1352 	xas_for_each(&xas, folio, ULONG_MAX) {
1353 		if (xas_retry(&xas, folio))
1354 			continue;
1355 		if (WARN_ON(xa_is_value(folio)))
1356 			break;
1357 		if (WARN_ON(folio_test_hugetlb(folio)))
1358 			break;
1359 
1360 		offset = offset_in_folio(folio, start);
1361 		len = min_t(size_t, maxsize, folio_size(folio) - offset);
1362 
1363 		sg_set_page(sg, folio_page(folio, 0), len, offset);
1364 		sgtable->nents++;
1365 		sg++;
1366 		sg_max--;
1367 
1368 		maxsize -= len;
1369 		ret += len;
1370 		if (maxsize <= 0 || sg_max == 0)
1371 			break;
1372 	}
1373 
1374 	rcu_read_unlock();
1375 	if (ret > 0)
1376 		iov_iter_advance(iter, ret);
1377 	return ret;
1378 }
1379 
1380 /**
1381  * extract_iter_to_sg - Extract pages from an iterator and add to an sglist
1382  * @iter: The iterator to extract from
1383  * @maxsize: The amount of iterator to copy
1384  * @sgtable: The scatterlist table to fill in
1385  * @sg_max: Maximum number of elements in @sgtable that may be filled
1386  * @extraction_flags: Flags to qualify the request
1387  *
1388  * Extract the page fragments from the given amount of the source iterator and
1389  * add them to a scatterlist that refers to all of those bits, to a maximum
1390  * addition of @sg_max elements.
1391  *
1392  * The pages referred to by UBUF- and IOVEC-type iterators are extracted and
1393  * pinned; BVEC-, KVEC-, FOLIOQ- and XARRAY-type are extracted but aren't
1394  * pinned; DISCARD-type is not supported.
1395  *
1396  * No end mark is placed on the scatterlist; that's left to the caller.
1397  *
1398  * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1399  * be allowed on the pages extracted.
1400  *
1401  * If successful, @sgtable->nents is updated to include the number of elements
1402  * added and the number of bytes added is returned.  @sgtable->orig_nents is
1403  * left unaltered.
1404  *
1405  * The iov_iter_extract_mode() function should be used to query how cleanup
1406  * should be performed.
1407  */
1408 ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
1409 			   struct sg_table *sgtable, unsigned int sg_max,
1410 			   iov_iter_extraction_t extraction_flags)
1411 {
1412 	if (maxsize == 0)
1413 		return 0;
1414 
1415 	switch (iov_iter_type(iter)) {
1416 	case ITER_UBUF:
1417 	case ITER_IOVEC:
1418 		return extract_user_to_sg(iter, maxsize, sgtable, sg_max,
1419 					  extraction_flags);
1420 	case ITER_BVEC:
1421 		return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max,
1422 					  extraction_flags);
1423 	case ITER_KVEC:
1424 		return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
1425 					  extraction_flags);
1426 	case ITER_FOLIOQ:
1427 		return extract_folioq_to_sg(iter, maxsize, sgtable, sg_max,
1428 					    extraction_flags);
1429 	case ITER_XARRAY:
1430 		return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max,
1431 					    extraction_flags);
1432 	default:
1433 		pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter));
1434 		WARN_ON_ONCE(1);
1435 		return -EIO;
1436 	}
1437 }
1438 EXPORT_SYMBOL_GPL(extract_iter_to_sg);
1439