Lines Matching +full:line +full:- +full:orders
1 // SPDX-License-Identifier: GPL-2.0-only
4 #include <linux/fault-inject-usercopy.h>
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
88 * Always returns 0 for non-userspace iterators.
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
95 return size - n;
101 size -= count;
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
103 size_t len = min(count, p->iov_len - skip);
108 ret = fault_in_readable(p->iov_base + skip, len);
109 count -= len - ret;
120 * fault_in_iov_iter_writeable - fault in iov iterator for writing
131 * Always returns 0 for non-user-space iterators.
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
138 return size - n;
144 size -= count;
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
146 size_t len = min(count, p->iov_len - skip);
151 ret = fault_in_safe_writeable(p->iov_base + skip, len);
152 count -= len - ret;
181 if (WARN_ON_ONCE(i->data_source))
211 * _copy_mc_to_iter - copy to iter with source memory error exception handling
217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
224 * byte-by-byte until the fault happens again. Re-triggering machine
226 * alignment and poison alignment assumptions to avoid re-triggering
236 if (WARN_ON_ONCE(i->data_source))
255 if (WARN_ON_ONCE(!i->data_source))
273 if (WARN_ON_ONCE(!i->data_source))
299 * _copy_from_iter_flushcache - write destination through cpu cache
304 * The pmem driver arranges for filesystem-dax to use this facility via
310 * instructions that strand dirty-data in the cache.
316 if (WARN_ON_ONCE(!i->data_source))
334 * However, we mostly deal with order-0 pages and thus can
335 * avoid a possible cache line miss for requests that fit all
336 * page orders.
342 v += (page - head) << PAGE_SHIFT;
355 if (WARN_ON_ONCE(i->data_source))
361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
365 bytes -= n;
385 if (WARN_ON_ONCE(i->data_source))
391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
398 bytes -= n;
421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
425 bytes -= n;
465 if (!page_copy_sane(&folio->page, offset, bytes))
467 if (WARN_ON_ONCE(!i->data_source))
473 n = bytes - copied;
475 n > PAGE_SIZE - offset_in_page(offset))
476 n = PAGE_SIZE - offset_in_page(offset);
494 if (!i->count)
496 i->count -= size;
498 size += i->iov_offset;
500 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
501 if (likely(size < bvec->bv_len))
503 size -= bvec->bv_len;
505 i->iov_offset = size;
506 i->nr_segs -= bvec - i->bvec;
507 i->bvec = bvec;
514 if (!i->count)
516 i->count -= size;
518 size += i->iov_offset; // from beginning of current segment
519 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
520 if (likely(size < iov->iov_len))
522 size -= iov->iov_len;
524 i->iov_offset = size;
525 i->nr_segs -= iov - iter_iov(i);
526 i->__iov = iov;
531 const struct folio_queue *folioq = i->folioq;
532 unsigned int slot = i->folioq_slot;
534 if (!i->count)
536 i->count -= size;
539 folioq = folioq->next;
543 size += i->iov_offset; /* From beginning of current segment. */
549 size -= fsize;
551 if (slot >= folioq_nr_slots(folioq) && folioq->next) {
552 folioq = folioq->next;
557 i->iov_offset = size;
558 i->folioq_slot = slot;
559 i->folioq = folioq;
564 if (unlikely(i->count < size))
565 size = i->count;
567 i->iov_offset += size;
568 i->count -= size;
577 i->count -= size;
584 const struct folio_queue *folioq = i->folioq;
585 unsigned int slot = i->folioq_slot;
591 folioq = folioq->prev;
594 slot--;
598 i->iov_offset = fsize - unroll;
601 unroll -= fsize;
604 i->folioq_slot = slot;
605 i->folioq = folioq;
614 i->count += unroll;
617 if (unroll <= i->iov_offset) {
618 i->iov_offset -= unroll;
621 unroll -= i->iov_offset;
628 const struct bio_vec *bvec = i->bvec;
630 size_t n = (--bvec)->bv_len;
631 i->nr_segs++;
633 i->bvec = bvec;
634 i->iov_offset = n - unroll;
637 unroll -= n;
640 i->iov_offset = 0;
645 size_t n = (--iov)->iov_len;
646 i->nr_segs++;
648 i->__iov = iov;
649 i->iov_offset = n - unroll;
652 unroll -= n;
663 if (i->nr_segs > 1) {
665 return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
667 return min(i->count, i->bvec->bv_len - i->iov_offset);
670 return !i->count ? 0 :
671 umin(folioq_folio_size(i->folioq, i->folioq_slot), i->count);
672 return i->count;
709 * iov_iter_folio_queue - Initialise an I/O iterator to use the folios in a folio queue
739 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
767 * iov_iter_discard - Initialise an I/O iterator that discards data
791 size_t size = i->count;
792 size_t skip = i->iov_offset;
795 size_t len = iov->iov_len - skip;
797 res |= (unsigned long)iov->iov_base + skip;
801 size -= len;
811 const struct bio_vec *bvec = i->bvec;
813 size_t size = i->count;
814 unsigned skip = i->iov_offset;
817 size_t len = bvec->bv_len - skip;
818 res |= (unsigned long)bvec->bv_offset + skip;
823 size -= len;
833 size_t size = i->count;
835 return ((unsigned long)i->ubuf + i->iov_offset) | size;
848 return i->iov_offset | i->count;
850 return (i->xarray_start + i->iov_offset) | i->count;
860 size_t size = i->count;
869 for (k = 0; k < i->nr_segs; k++) {
871 if (iov->iov_len) {
872 unsigned long base = (unsigned long)iov->iov_base;
875 v = base + iov->iov_len;
876 if (size <= iov->iov_len)
878 size -= iov->iov_len;
905 const struct folio_queue *folioq = iter->folioq;
907 unsigned int slot = iter->folioq_slot;
908 size_t extracted = 0, count = iter->count, iov_offset = iter->iov_offset;
911 folioq = folioq->next;
914 return -EIO;
919 return -ENOMEM;
926 size_t part = PAGE_SIZE - offset % PAGE_SIZE;
929 part = umin(part, umin(maxsize - extracted, fsize - offset));
930 count -= part;
937 maxpages--;
946 if (slot == folioq_nr_slots(folioq) && folioq->next) {
947 folioq = folioq->next;
953 iter->count = count;
954 iter->iov_offset = iov_offset;
955 iter->folioq = folioq;
956 iter->folioq_slot = slot;
995 pos = i->xarray_start + i->iov_offset;
1002 return -ENOMEM;
1003 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1007 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1008 i->iov_offset += maxsize;
1009 i->count -= maxsize;
1013 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1020 return (unsigned long)i->ubuf + i->iov_offset;
1022 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1024 size_t len = iov->iov_len - skip;
1030 return (unsigned long)iov->iov_base + skip;
1035 /* must be done on non-empty ITER_BVEC one */
1040 size_t skip = i->iov_offset, len;
1042 len = i->bvec->bv_len - skip;
1045 skip += i->bvec->bv_offset;
1046 page = i->bvec->bv_page + skip / PAGE_SIZE;
1057 if (maxsize > i->count)
1058 maxsize = i->count;
1070 if (i->nofault)
1078 return -ENOMEM;
1082 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1093 return -ENOMEM;
1101 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1102 i->count -= maxsize;
1103 i->iov_offset += maxsize;
1104 if (i->iov_offset == i->bvec->bv_len) {
1105 i->iov_offset = 0;
1106 i->bvec++;
1107 i->nr_segs--;
1115 return -EFAULT;
1147 size_t skip = i->iov_offset, size = i->count;
1152 unsigned offs = offset_in_page(p->iov_base + skip);
1153 size_t len = min(p->iov_len - skip, size);
1156 size -= len;
1167 size_t skip = i->iov_offset, size = i->count;
1171 for (p = i->bvec; size; skip = 0, p++) {
1172 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1173 size_t len = min(p->bv_len - skip, size);
1175 size -= len;
1185 if (unlikely(!i->count))
1188 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1189 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1198 unsigned offset = i->iov_offset % PAGE_SIZE;
1199 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1203 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1204 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1215 return new->bvec = kmemdup(new->bvec,
1216 new->nr_segs * sizeof(struct bio_vec),
1220 return new->__iov = kmemdup(new->__iov,
1221 new->nr_segs * sizeof(struct iovec),
1232 int ret = -EFAULT;
1236 return -EFAULT;
1247 ret = -EINVAL;
1263 int ret = -EFAULT;
1266 return -EFAULT;
1272 unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1273 unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1277 ret = -EINVAL;
1280 iov->iov_base = buf;
1281 iov->iov_len = len;
1284 } while (--nr_segs);
1307 return ERR_PTR(-EINVAL);
1311 return ERR_PTR(-ENOMEM);
1346 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1349 return i->count;
1384 return -EFAULT;
1387 if (len > MAX_RW_COUNT - total_len) {
1388 len = MAX_RW_COUNT - total_len;
1403 * import_iovec() - Copy an array of &struct iovec from userspace
1412 * on-stack) kernel array.
1419 * on-stack array was used or not (and regardless of whether this function
1438 return -EFAULT;
1446 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1462 i->iov_offset = state->iov_offset;
1463 i->count = state->count;
1467 * For the *vec iters, nr_segs + iov is constant - if we increment
1477 i->bvec -= state->nr_segs - i->nr_segs;
1479 i->__iov -= state->nr_segs - i->nr_segs;
1480 i->nr_segs = state->nr_segs;
1493 const struct folio_queue *folioq = i->folioq;
1496 size_t extracted = 0, offset, slot = i->folioq_slot;
1499 folioq = folioq->next;
1501 if (WARN_ON(i->iov_offset != 0))
1502 return -EIO;
1505 offset = i->iov_offset & ~PAGE_MASK;
1510 return -ENOMEM;
1515 size_t offset = i->iov_offset, fsize = folioq_folio_size(folioq, slot);
1516 size_t part = PAGE_SIZE - offset % PAGE_SIZE;
1519 part = umin(part, umin(maxsize - extracted, fsize - offset));
1520 i->count -= part;
1521 i->iov_offset += part;
1530 if (i->iov_offset >= fsize) {
1531 i->iov_offset = 0;
1533 if (slot == folioq_nr_slots(folioq) && folioq->next) {
1534 folioq = folioq->next;
1540 i->folioq = folioq;
1541 i->folioq_slot = slot;
1558 loff_t pos = i->xarray_start + i->iov_offset;
1559 XA_STATE(xas, i->xarray, pos >> PAGE_SHIFT);
1566 return -ENOMEM;
1586 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1601 size_t skip = i->iov_offset, size = 0;
1605 if (i->nr_segs == 0)
1608 if (i->iov_offset == i->bvec->bv_len) {
1609 i->iov_offset = 0;
1610 i->nr_segs--;
1611 i->bvec++;
1620 while (bi.bi_size && bi.bi_idx < i->nr_segs) {
1621 struct bio_vec bv = bvec_iter_bvec(i->bvec, bi);
1651 bvec_iter_advance_single(i->bvec, &bi, bv.bv_len);
1670 size_t skip = i->iov_offset, offset, len, size;
1674 if (i->nr_segs == 0)
1676 size = min(maxsize, i->kvec->iov_len - skip);
1679 i->iov_offset = 0;
1680 i->nr_segs--;
1681 i->kvec++;
1685 kaddr = i->kvec->iov_base + skip;
1691 return -ENOMEM;
1694 kaddr -= offset;
1705 len -= seg;
1709 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
1716 * each of them. This should only be used if the iterator is user-backed
1738 if (i->data_source == ITER_DEST)
1742 if (i->nofault)
1750 return -ENOMEM;
1754 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
1760 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1777 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1785 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1800 * It may also return -ENOMEM and -EFAULT.
1809 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
1833 return -EFAULT;