Lines Matching +full:min +full:- +full:len
1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/fault-inject-usercopy.h>
18 size_t len, void *from, void *priv2) in copy_to_user_iter() argument
21 return len; in copy_to_user_iter()
22 if (access_ok(iter_to, len)) { in copy_to_user_iter()
24 instrument_copy_to_user(iter_to, from, len); in copy_to_user_iter()
25 len = raw_copy_to_user(iter_to, from, len); in copy_to_user_iter()
27 return len; in copy_to_user_iter()
32 size_t len, void *from, void *priv2) in copy_to_user_iter_nofault() argument
37 return len; in copy_to_user_iter_nofault()
40 res = copy_to_user_nofault(iter_to, from, len); in copy_to_user_iter_nofault()
41 return res < 0 ? len : res; in copy_to_user_iter_nofault()
46 size_t len, void *to, void *priv2) in copy_from_user_iter() argument
48 size_t res = len; in copy_from_user_iter()
51 return len; in copy_from_user_iter()
52 if (access_ok(iter_from, len)) { in copy_from_user_iter()
54 instrument_copy_from_user_before(to, iter_from, len); in copy_from_user_iter()
55 res = raw_copy_from_user(to, iter_from, len); in copy_from_user_iter()
56 instrument_copy_from_user_after(to, iter_from, len, res); in copy_from_user_iter()
63 size_t len, void *from, void *priv2) in memcpy_to_iter() argument
65 memcpy(iter_to, from + progress, len); in memcpy_to_iter()
71 size_t len, void *to, void *priv2) in memcpy_from_iter() argument
73 memcpy(to + progress, iter_from, len); in memcpy_from_iter()
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
88 * Always returns 0 for non-userspace iterators.
93 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_readable()
95 return size - n; in fault_in_iov_iter_readable()
97 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_readable()
101 size -= count; in fault_in_iov_iter_readable()
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_readable()
103 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_readable() local
106 if (unlikely(!len)) in fault_in_iov_iter_readable()
108 ret = fault_in_readable(p->iov_base + skip, len); in fault_in_iov_iter_readable()
109 count -= len - ret; in fault_in_iov_iter_readable()
120 * fault_in_iov_iter_writeable - fault in iov iterator for writing
131 * Always returns 0 for non-user-space iterators.
136 size_t n = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); in fault_in_iov_iter_writeable()
138 return size - n; in fault_in_iov_iter_writeable()
140 size_t count = min(size, iov_iter_count(i)); in fault_in_iov_iter_writeable()
144 size -= count; in fault_in_iov_iter_writeable()
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { in fault_in_iov_iter_writeable()
146 size_t len = min(count, p->iov_len - skip); in fault_in_iov_iter_writeable() local
149 if (unlikely(!len)) in fault_in_iov_iter_writeable()
151 ret = fault_in_safe_writeable(p->iov_base + skip, len); in fault_in_iov_iter_writeable()
152 count -= len - ret; in fault_in_iov_iter_writeable()
181 if (WARN_ON_ONCE(i->data_source)) in _copy_to_iter()
193 size_t len, void *from, void *priv2) in copy_to_user_iter_mc() argument
195 if (access_ok(iter_to, len)) { in copy_to_user_iter_mc()
197 instrument_copy_to_user(iter_to, from, len); in copy_to_user_iter_mc()
198 len = copy_mc_to_user(iter_to, from, len); in copy_to_user_iter_mc()
200 return len; in copy_to_user_iter_mc()
205 size_t len, void *from, void *priv2) in memcpy_to_iter_mc() argument
207 return copy_mc_to_kernel(iter_to, from + progress, len); in memcpy_to_iter_mc()
211 * _copy_mc_to_iter - copy to iter with source memory error exception handling
217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
224 * byte-by-byte until the fault happens again. Re-triggering machine
226 * alignment and poison alignment assumptions to avoid re-triggering
236 if (WARN_ON_ONCE(i->data_source)) in _copy_mc_to_iter()
255 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter()
266 size_t len, void *to, void *priv2) in copy_from_user_iter_nocache() argument
268 return __copy_from_user_inatomic_nocache(to + progress, iter_from, len); in copy_from_user_iter_nocache()
273 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_nocache()
285 size_t len, void *to, void *priv2) in copy_from_user_iter_flushcache() argument
287 return __copy_from_user_flushcache(to + progress, iter_from, len); in copy_from_user_iter_flushcache()
292 size_t len, void *to, void *priv2) in memcpy_from_iter_flushcache() argument
294 memcpy_flushcache(to + progress, iter_from, len); in memcpy_from_iter_flushcache()
299 * _copy_from_iter_flushcache - write destination through cpu cache
304 * The pmem driver arranges for filesystem-dax to use this facility via
310 * instructions that strand dirty-data in the cache.
316 if (WARN_ON_ONCE(!i->data_source)) in _copy_from_iter_flushcache()
334 * However, we mostly deal with order-0 pages and thus can in page_copy_sane()
342 v += (page - head) << PAGE_SHIFT; in page_copy_sane()
355 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter()
361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter()
365 bytes -= n; in copy_page_to_iter()
385 if (WARN_ON_ONCE(i->data_source)) in copy_page_to_iter_nofault()
391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_to_iter_nofault()
398 bytes -= n; in copy_page_to_iter_nofault()
421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); in copy_page_from_iter()
425 bytes -= n; in copy_page_from_iter()
440 size_t len, void *priv, void *priv2) in zero_to_user_iter() argument
442 return clear_user(iter_to, len); in zero_to_user_iter()
447 size_t len, void *priv, void *priv2) in zero_to_iter() argument
449 memset(iter_to, 0, len); in zero_to_iter()
465 if (!page_copy_sane(&folio->page, offset, bytes)) in copy_folio_from_iter_atomic()
467 if (WARN_ON_ONCE(!i->data_source)) in copy_folio_from_iter_atomic()
473 n = bytes - copied; in copy_folio_from_iter_atomic()
475 n > PAGE_SIZE - offset_in_page(offset)) in copy_folio_from_iter_atomic()
476 n = PAGE_SIZE - offset_in_page(offset); in copy_folio_from_iter_atomic()
494 if (!i->count) in iov_iter_bvec_advance()
496 i->count -= size; in iov_iter_bvec_advance()
498 size += i->iov_offset; in iov_iter_bvec_advance()
500 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { in iov_iter_bvec_advance()
501 if (likely(size < bvec->bv_len)) in iov_iter_bvec_advance()
503 size -= bvec->bv_len; in iov_iter_bvec_advance()
505 i->iov_offset = size; in iov_iter_bvec_advance()
506 i->nr_segs -= bvec - i->bvec; in iov_iter_bvec_advance()
507 i->bvec = bvec; in iov_iter_bvec_advance()
514 if (!i->count) in iov_iter_iovec_advance()
516 i->count -= size; in iov_iter_iovec_advance()
518 size += i->iov_offset; // from beginning of current segment in iov_iter_iovec_advance()
519 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { in iov_iter_iovec_advance()
520 if (likely(size < iov->iov_len)) in iov_iter_iovec_advance()
522 size -= iov->iov_len; in iov_iter_iovec_advance()
524 i->iov_offset = size; in iov_iter_iovec_advance()
525 i->nr_segs -= iov - iter_iov(i); in iov_iter_iovec_advance()
526 i->__iov = iov; in iov_iter_iovec_advance()
531 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_advance()
532 unsigned int slot = i->folioq_slot; in iov_iter_folioq_advance()
534 if (!i->count) in iov_iter_folioq_advance()
536 i->count -= size; in iov_iter_folioq_advance()
539 folioq = folioq->next; in iov_iter_folioq_advance()
543 size += i->iov_offset; /* From beginning of current segment. */ in iov_iter_folioq_advance()
549 size -= fsize; in iov_iter_folioq_advance()
551 if (slot >= folioq_nr_slots(folioq) && folioq->next) { in iov_iter_folioq_advance()
552 folioq = folioq->next; in iov_iter_folioq_advance()
557 i->iov_offset = size; in iov_iter_folioq_advance()
558 i->folioq_slot = slot; in iov_iter_folioq_advance()
559 i->folioq = folioq; in iov_iter_folioq_advance()
564 if (unlikely(i->count < size)) in iov_iter_advance()
565 size = i->count; in iov_iter_advance()
567 i->iov_offset += size; in iov_iter_advance()
568 i->count -= size; in iov_iter_advance()
577 i->count -= size; in iov_iter_advance()
584 const struct folio_queue *folioq = i->folioq; in iov_iter_folioq_revert()
585 unsigned int slot = i->folioq_slot; in iov_iter_folioq_revert()
591 folioq = folioq->prev; in iov_iter_folioq_revert()
594 slot--; in iov_iter_folioq_revert()
598 i->iov_offset = fsize - unroll; in iov_iter_folioq_revert()
601 unroll -= fsize; in iov_iter_folioq_revert()
604 i->folioq_slot = slot; in iov_iter_folioq_revert()
605 i->folioq = folioq; in iov_iter_folioq_revert()
614 i->count += unroll; in iov_iter_revert()
617 if (unroll <= i->iov_offset) { in iov_iter_revert()
618 i->iov_offset -= unroll; in iov_iter_revert()
621 unroll -= i->iov_offset; in iov_iter_revert()
628 const struct bio_vec *bvec = i->bvec; in iov_iter_revert()
630 size_t n = (--bvec)->bv_len; in iov_iter_revert()
631 i->nr_segs++; in iov_iter_revert()
633 i->bvec = bvec; in iov_iter_revert()
634 i->iov_offset = n - unroll; in iov_iter_revert()
637 unroll -= n; in iov_iter_revert()
640 i->iov_offset = 0; in iov_iter_revert()
645 size_t n = (--iov)->iov_len; in iov_iter_revert()
646 i->nr_segs++; in iov_iter_revert()
648 i->__iov = iov; in iov_iter_revert()
649 i->iov_offset = n - unroll; in iov_iter_revert()
652 unroll -= n; in iov_iter_revert()
663 if (i->nr_segs > 1) { in iov_iter_single_seg_count()
665 return min(i->count, iter_iov(i)->iov_len - i->iov_offset); in iov_iter_single_seg_count()
667 return min(i->count, i->bvec->bv_len - i->iov_offset); in iov_iter_single_seg_count()
670 return !i->count ? 0 : in iov_iter_single_seg_count()
671 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count); in iov_iter_single_seg_count()
672 return i->count; in iov_iter_single_seg_count()
709 * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
739 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
767 * iov_iter_discard - Initialise an I/O iterator that discards data
791 size_t size = i->count; in iov_iter_aligned_iovec()
792 size_t skip = i->iov_offset; in iov_iter_aligned_iovec()
795 size_t len = iov->iov_len - skip; in iov_iter_aligned_iovec() local
797 if (len > size) in iov_iter_aligned_iovec()
798 len = size; in iov_iter_aligned_iovec()
799 if (len & len_mask) in iov_iter_aligned_iovec()
801 if ((unsigned long)(iov->iov_base + skip) & addr_mask) in iov_iter_aligned_iovec()
805 size -= len; in iov_iter_aligned_iovec()
815 const struct bio_vec *bvec = i->bvec; in iov_iter_aligned_bvec()
816 unsigned skip = i->iov_offset; in iov_iter_aligned_bvec()
817 size_t size = i->count; in iov_iter_aligned_bvec()
820 size_t len = bvec->bv_len - skip; in iov_iter_aligned_bvec() local
822 if (len > size) in iov_iter_aligned_bvec()
823 len = size; in iov_iter_aligned_bvec()
824 if (len & len_mask) in iov_iter_aligned_bvec()
826 if ((unsigned long)(bvec->bv_offset + skip) & addr_mask) in iov_iter_aligned_bvec()
830 size -= len; in iov_iter_aligned_bvec()
838 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
851 if (i->count & len_mask) in iov_iter_is_aligned()
853 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
866 if (i->count & len_mask) in iov_iter_is_aligned()
868 if ((i->xarray_start + i->iov_offset) & addr_mask) in iov_iter_is_aligned()
872 if (i->count & len_mask) in iov_iter_is_aligned()
874 if (i->iov_offset & addr_mask) in iov_iter_is_aligned()
886 size_t size = i->count; in iov_iter_alignment_iovec()
887 size_t skip = i->iov_offset; in iov_iter_alignment_iovec()
890 size_t len = iov->iov_len - skip; in iov_iter_alignment_iovec() local
891 if (len) { in iov_iter_alignment_iovec()
892 res |= (unsigned long)iov->iov_base + skip; in iov_iter_alignment_iovec()
893 if (len > size) in iov_iter_alignment_iovec()
894 len = size; in iov_iter_alignment_iovec()
895 res |= len; in iov_iter_alignment_iovec()
896 size -= len; in iov_iter_alignment_iovec()
906 const struct bio_vec *bvec = i->bvec; in iov_iter_alignment_bvec()
908 size_t size = i->count; in iov_iter_alignment_bvec()
909 unsigned skip = i->iov_offset; in iov_iter_alignment_bvec()
912 size_t len = bvec->bv_len - skip; in iov_iter_alignment_bvec() local
913 res |= (unsigned long)bvec->bv_offset + skip; in iov_iter_alignment_bvec()
914 if (len > size) in iov_iter_alignment_bvec()
915 len = size; in iov_iter_alignment_bvec()
916 res |= len; in iov_iter_alignment_bvec()
918 size -= len; in iov_iter_alignment_bvec()
928 size_t size = i->count; in iov_iter_alignment()
930 return ((unsigned long)i->ubuf + i->iov_offset) | size; in iov_iter_alignment()
943 return i->iov_offset | i->count; in iov_iter_alignment()
945 return (i->xarray_start + i->iov_offset) | i->count; in iov_iter_alignment()
955 size_t size = i->count; in iov_iter_gap_alignment()
964 for (k = 0; k < i->nr_segs; k++) { in iov_iter_gap_alignment()
966 if (iov->iov_len) { in iov_iter_gap_alignment()
967 unsigned long base = (unsigned long)iov->iov_base; in iov_iter_gap_alignment()
970 v = base + iov->iov_len; in iov_iter_gap_alignment()
971 if (size <= iov->iov_len) in iov_iter_gap_alignment()
973 size -= iov->iov_len; in iov_iter_gap_alignment()
1000 const struct folio_queue *folioq = iter->folioq; in iter_folioq_get_pages()
1002 unsigned int slot = iter->folioq_slot; in iter_folioq_get_pages()
1003 size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset; in iter_folioq_get_pages()
1006 folioq = folioq->next; in iter_folioq_get_pages()
1009 return -EIO; in iter_folioq_get_pages()
1014 return -ENOMEM; in iter_folioq_get_pages()
1021 size_t part = PAGE_SIZE - offset % PAGE_SIZE; in iter_folioq_get_pages()
1024 part = umin(part, umin(maxsize - extracted, fsize - offset)); in iter_folioq_get_pages()
1025 count -= part; in iter_folioq_get_pages()
1032 maxpages--; in iter_folioq_get_pages()
1041 if (slot == folioq_nr_slots(folioq) && folioq->next) { in iter_folioq_get_pages()
1042 folioq = folioq->next; in iter_folioq_get_pages()
1048 iter->count = count; in iter_folioq_get_pages()
1049 iter->iov_offset = iov_offset; in iter_folioq_get_pages()
1050 iter->folioq = folioq; in iter_folioq_get_pages()
1051 iter->folioq_slot = slot; in iter_folioq_get_pages()
1090 pos = i->xarray_start + i->iov_offset; in iter_xarray_get_pages()
1097 return -ENOMEM; in iter_xarray_get_pages()
1098 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); in iter_xarray_get_pages()
1102 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iter_xarray_get_pages()
1103 i->iov_offset += maxsize; in iter_xarray_get_pages()
1104 i->count -= maxsize; in iter_xarray_get_pages()
1108 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1115 return (unsigned long)i->ubuf + i->iov_offset; in first_iovec_segment()
1117 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { in first_iovec_segment()
1119 size_t len = iov->iov_len - skip; in first_iovec_segment() local
1121 if (unlikely(!len)) in first_iovec_segment()
1123 if (*size > len) in first_iovec_segment()
1124 *size = len; in first_iovec_segment()
1125 return (unsigned long)iov->iov_base + skip; in first_iovec_segment()
1130 /* must be done on non-empty ITER_BVEC one */
1135 size_t skip = i->iov_offset, len; in first_bvec_segment() local
1137 len = i->bvec->bv_len - skip; in first_bvec_segment()
1138 if (*size > len) in first_bvec_segment()
1139 *size = len; in first_bvec_segment()
1140 skip += i->bvec->bv_offset; in first_bvec_segment()
1141 page = i->bvec->bv_page + skip / PAGE_SIZE; in first_bvec_segment()
1152 if (maxsize > i->count) in __iov_iter_get_pages_alloc()
1153 maxsize = i->count; in __iov_iter_get_pages_alloc()
1165 if (i->nofault) in __iov_iter_get_pages_alloc()
1173 return -ENOMEM; in __iov_iter_get_pages_alloc()
1177 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1188 return -ENOMEM; in __iov_iter_get_pages_alloc()
1196 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); in __iov_iter_get_pages_alloc()
1197 i->count -= maxsize; in __iov_iter_get_pages_alloc()
1198 i->iov_offset += maxsize; in __iov_iter_get_pages_alloc()
1199 if (i->iov_offset == i->bvec->bv_len) { in __iov_iter_get_pages_alloc()
1200 i->iov_offset = 0; in __iov_iter_get_pages_alloc()
1201 i->bvec++; in __iov_iter_get_pages_alloc()
1202 i->nr_segs--; in __iov_iter_get_pages_alloc()
1210 return -EFAULT; in __iov_iter_get_pages_alloc()
1227 ssize_t len; in iov_iter_get_pages_alloc2() local
1231 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); in iov_iter_get_pages_alloc2()
1232 if (len <= 0) { in iov_iter_get_pages_alloc2()
1236 return len; in iov_iter_get_pages_alloc2()
1242 size_t skip = i->iov_offset, size = i->count; in iov_npages()
1247 unsigned offs = offset_in_page(p->iov_base + skip); in iov_npages()
1248 size_t len = min(p->iov_len - skip, size); in iov_npages() local
1250 if (len) { in iov_npages()
1251 size -= len; in iov_npages()
1252 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); in iov_npages()
1262 size_t skip = i->iov_offset, size = i->count; in bvec_npages()
1266 for (p = i->bvec; size; skip = 0, p++) { in bvec_npages()
1267 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; in bvec_npages()
1268 size_t len = min(p->bv_len - skip, size); in bvec_npages() local
1270 size -= len; in bvec_npages()
1271 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); in bvec_npages()
1280 if (unlikely(!i->count)) in iov_iter_npages()
1283 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); in iov_iter_npages()
1284 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); in iov_iter_npages()
1285 return min(npages, maxpages); in iov_iter_npages()
1293 unsigned offset = i->iov_offset % PAGE_SIZE; in iov_iter_npages()
1294 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1295 return min(npages, maxpages); in iov_iter_npages()
1298 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; in iov_iter_npages()
1299 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); in iov_iter_npages()
1300 return min(npages, maxpages); in iov_iter_npages()
1310 return new->bvec = kmemdup(new->bvec, in dup_iter()
1311 new->nr_segs * sizeof(struct bio_vec), in dup_iter()
1315 return new->__iov = kmemdup(new->__iov, in dup_iter()
1316 new->nr_segs * sizeof(struct iovec), in dup_iter()
1327 int ret = -EFAULT; in copy_compat_iovec_from_user()
1331 return -EFAULT; in copy_compat_iovec_from_user()
1335 compat_ssize_t len; in copy_compat_iovec_from_user() local
1337 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); in copy_compat_iovec_from_user()
1341 if (len < 0) { in copy_compat_iovec_from_user()
1342 ret = -EINVAL; in copy_compat_iovec_from_user()
1346 iov[i].iov_len = len; in copy_compat_iovec_from_user()
1358 int ret = -EFAULT; in copy_iovec_from_user()
1361 return -EFAULT; in copy_iovec_from_user()
1365 ssize_t len; in copy_iovec_from_user() local
1367 unsafe_get_user(len, &uiov->iov_len, uaccess_end); in copy_iovec_from_user()
1368 unsafe_get_user(buf, &uiov->iov_base, uaccess_end); in copy_iovec_from_user()
1371 if (unlikely(len < 0)) { in copy_iovec_from_user()
1372 ret = -EINVAL; in copy_iovec_from_user()
1375 iov->iov_base = buf; in copy_iovec_from_user()
1376 iov->iov_len = len; in copy_iovec_from_user()
1379 } while (--nr_segs); in copy_iovec_from_user()
1402 return ERR_PTR(-EINVAL); in iovec_from_user()
1406 return ERR_PTR(-ENOMEM); in iovec_from_user()
1441 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); in __import_iovec_ubuf()
1444 return i->count; in __import_iovec_ubuf()
1473 ssize_t len = (ssize_t)iov[seg].iov_len; in __import_iovec() local
1475 if (!access_ok(iov[seg].iov_base, len)) { in __import_iovec()
1479 return -EFAULT; in __import_iovec()
1482 if (len > MAX_RW_COUNT - total_len) { in __import_iovec()
1483 len = MAX_RW_COUNT - total_len; in __import_iovec()
1484 iov[seg].iov_len = len; in __import_iovec()
1486 total_len += len; in __import_iovec()
1498 * import_iovec() - Copy an array of &struct iovec from userspace
1507 * on-stack) kernel array.
1514 * on-stack array was used or not (and regardless of whether this function
1528 int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) in import_ubuf() argument
1530 if (len > MAX_RW_COUNT) in import_ubuf()
1531 len = MAX_RW_COUNT; in import_ubuf()
1532 if (unlikely(!access_ok(buf, len))) in import_ubuf()
1533 return -EFAULT; in import_ubuf()
1535 iov_iter_ubuf(i, rw, buf, len); in import_ubuf()
1541 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1557 i->iov_offset = state->iov_offset; in iov_iter_restore()
1558 i->count = state->count; in iov_iter_restore()
1562 * For the *vec iters, nr_segs + iov is constant - if we increment in iov_iter_restore()
1572 i->bvec -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1574 i->__iov -= state->nr_segs - i->nr_segs; in iov_iter_restore()
1575 i->nr_segs = state->nr_segs; in iov_iter_restore()
1588 const struct folio_queue *folioq = i->folioq; in iov_iter_extract_folioq_pages()
1591 size_t extracted = 0, offset, slot = i->folioq_slot; in iov_iter_extract_folioq_pages()
1594 folioq = folioq->next; in iov_iter_extract_folioq_pages()
1596 if (WARN_ON(i->iov_offset != 0)) in iov_iter_extract_folioq_pages()
1597 return -EIO; in iov_iter_extract_folioq_pages()
1600 offset = i->iov_offset & ~PAGE_MASK; in iov_iter_extract_folioq_pages()
1605 return -ENOMEM; in iov_iter_extract_folioq_pages()
1610 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot); in iov_iter_extract_folioq_pages()
1611 size_t part = PAGE_SIZE - offset % PAGE_SIZE; in iov_iter_extract_folioq_pages()
1614 part = umin(part, umin(maxsize - extracted, fsize - offset)); in iov_iter_extract_folioq_pages()
1615 i->count -= part; in iov_iter_extract_folioq_pages()
1616 i->iov_offset += part; in iov_iter_extract_folioq_pages()
1625 if (i->iov_offset >= fsize) { in iov_iter_extract_folioq_pages()
1626 i->iov_offset = 0; in iov_iter_extract_folioq_pages()
1628 if (slot == folioq_nr_slots(folioq) && folioq->next) { in iov_iter_extract_folioq_pages()
1629 folioq = folioq->next; in iov_iter_extract_folioq_pages()
1635 i->folioq = folioq; in iov_iter_extract_folioq_pages()
1636 i->folioq_slot = slot; in iov_iter_extract_folioq_pages()
1653 loff_t pos = i->xarray_start + i->iov_offset; in iov_iter_extract_xarray_pages()
1654 XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT); in iov_iter_extract_xarray_pages()
1661 return -ENOMEM; in iov_iter_extract_xarray_pages()
1681 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); in iov_iter_extract_xarray_pages()
1696 size_t skip = i->iov_offset, size = 0; in iov_iter_extract_bvec_pages()
1700 if (i->nr_segs == 0) in iov_iter_extract_bvec_pages()
1703 if (i->iov_offset == i->bvec->bv_len) { in iov_iter_extract_bvec_pages()
1704 i->iov_offset = 0; in iov_iter_extract_bvec_pages()
1705 i->nr_segs--; in iov_iter_extract_bvec_pages()
1706 i->bvec++; in iov_iter_extract_bvec_pages()
1715 while (bi.bi_size && bi.bi_idx < i->nr_segs) { in iov_iter_extract_bvec_pages()
1716 struct bio_vec bv = bvec_iter_bvec(i->bvec, bi); in iov_iter_extract_bvec_pages()
1746 bvec_iter_advance_single(i->bvec, &bi, bv.bv_len); in iov_iter_extract_bvec_pages()
1765 size_t skip = i->iov_offset, offset, len, size; in iov_iter_extract_kvec_pages() local
1769 if (i->nr_segs == 0) in iov_iter_extract_kvec_pages()
1771 size = min(maxsize, i->kvec->iov_len - skip); in iov_iter_extract_kvec_pages()
1774 i->iov_offset = 0; in iov_iter_extract_kvec_pages()
1775 i->nr_segs--; in iov_iter_extract_kvec_pages()
1776 i->kvec++; in iov_iter_extract_kvec_pages()
1780 kaddr = i->kvec->iov_base + skip; in iov_iter_extract_kvec_pages()
1786 return -ENOMEM; in iov_iter_extract_kvec_pages()
1789 kaddr -= offset; in iov_iter_extract_kvec_pages()
1790 len = offset + size; in iov_iter_extract_kvec_pages()
1792 size_t seg = min_t(size_t, len, PAGE_SIZE); in iov_iter_extract_kvec_pages()
1800 len -= seg; in iov_iter_extract_kvec_pages()
1804 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); in iov_iter_extract_kvec_pages()
1811 * each of them. This should only be used if the iterator is user-backed
1833 if (i->data_source == ITER_DEST) in iov_iter_extract_user_pages()
1837 if (i->nofault) in iov_iter_extract_user_pages()
1845 return -ENOMEM; in iov_iter_extract_user_pages()
1849 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); in iov_iter_extract_user_pages()
1855 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1872 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1880 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1895 * It may also return -ENOMEM and -EFAULT.
1904 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); in iov_iter_extract_pages()
1928 return -EFAULT; in iov_iter_extract_pages()