1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers iovec and kvec alike */ 20 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 21 size_t off = 0; \ 22 size_t skip = i->iov_offset; \ 23 do { \ 24 len = min(n, __p->iov_len - skip); \ 25 if (likely(len)) { \ 26 base = __p->iov_base + skip; \ 27 len -= (STEP); \ 28 off += len; \ 29 skip += len; \ 30 n -= len; \ 31 if (skip < __p->iov_len) \ 32 break; \ 33 } \ 34 __p++; \ 35 skip = 0; \ 36 } while (n); \ 37 i->iov_offset = skip; \ 38 n = off; \ 39 } 40 41 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 42 size_t off = 0; \ 43 unsigned skip = i->iov_offset; \ 44 while (n) { \ 45 unsigned offset = p->bv_offset + skip; \ 46 unsigned left; \ 47 void *kaddr = kmap_local_page(p->bv_page + \ 48 offset / PAGE_SIZE); \ 49 base = kaddr + offset % PAGE_SIZE; \ 50 len = min(min(n, (size_t)(p->bv_len - skip)), \ 51 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 52 left = (STEP); \ 53 kunmap_local(kaddr); \ 54 len -= left; \ 55 off += len; \ 56 skip += len; \ 57 if (skip == p->bv_len) { \ 58 skip = 0; \ 59 p++; \ 60 } \ 61 n -= len; \ 62 if (left) \ 63 break; \ 64 } \ 65 i->iov_offset = skip; \ 66 n = off; \ 67 } 68 69 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 70 __label__ __out; \ 71 size_t __off = 0; \ 72 struct folio *folio; \ 73 loff_t start = i->xarray_start + i->iov_offset; \ 74 pgoff_t index = start / PAGE_SIZE; \ 75 XA_STATE(xas, i->xarray, index); \ 76 \ 77 len = PAGE_SIZE - offset_in_page(start); \ 78 rcu_read_lock(); \ 79 xas_for_each(&xas, folio, ULONG_MAX) { \ 80 unsigned left; \ 81 size_t offset; \ 82 if (xas_retry(&xas, folio)) \ 83 continue; \ 84 if (WARN_ON(xa_is_value(folio))) \ 85 break; \ 86 if (WARN_ON(folio_test_hugetlb(folio))) \ 87 break; \ 88 offset = offset_in_folio(folio, start + __off); \ 89 while (offset < folio_size(folio)) { \ 90 base = kmap_local_folio(folio, offset); \ 91 len = min(n, len); \ 92 left = (STEP); \ 93 kunmap_local(base); \ 94 len -= left; \ 95 __off += len; \ 96 n -= len; \ 97 if (left || n == 0) \ 98 goto __out; \ 99 offset += len; \ 100 len = PAGE_SIZE; \ 101 } \ 102 } \ 103 __out: \ 104 rcu_read_unlock(); \ 105 i->iov_offset += __off; \ 106 n = __off; \ 107 } 108 109 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 110 if (unlikely(i->count < n)) \ 111 n = i->count; \ 112 if (likely(n)) { \ 113 if (likely(iter_is_iovec(i))) { \ 114 const struct iovec *iov = i->iov; \ 115 void __user *base; \ 116 size_t len; \ 117 iterate_iovec(i, n, base, len, off, \ 118 iov, (I)) \ 119 i->nr_segs -= iov - i->iov; \ 120 i->iov = iov; \ 121 } else if (iov_iter_is_bvec(i)) { \ 122 const struct bio_vec *bvec = i->bvec; \ 123 void *base; \ 124 size_t len; \ 125 iterate_bvec(i, n, base, len, off, \ 126 bvec, (K)) \ 127 i->nr_segs -= bvec - i->bvec; \ 128 i->bvec = bvec; \ 129 } else if (iov_iter_is_kvec(i)) { \ 130 const struct kvec *kvec = i->kvec; \ 131 void *base; \ 132 size_t len; \ 133 iterate_iovec(i, n, base, len, off, \ 134 kvec, (K)) \ 135 i->nr_segs -= kvec - i->kvec; \ 136 i->kvec = kvec; \ 137 } else if (iov_iter_is_xarray(i)) { \ 138 void *base; \ 139 size_t len; \ 140 iterate_xarray(i, n, base, len, off, \ 141 (K)) \ 142 } \ 143 i->count -= n; \ 144 } \ 145 } 146 #define iterate_and_advance(i, n, base, len, off, I, K) \ 147 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 148 149 static int copyout(void __user *to, const void *from, size_t n) 150 { 151 if (should_fail_usercopy()) 152 return n; 153 if (access_ok(to, n)) { 154 instrument_copy_to_user(to, from, n); 155 n = raw_copy_to_user(to, from, n); 156 } 157 return n; 158 } 159 160 static int copyin(void *to, const void __user *from, size_t n) 161 { 162 if (should_fail_usercopy()) 163 return n; 164 if (access_ok(from, n)) { 165 instrument_copy_from_user(to, from, n); 166 n = raw_copy_from_user(to, from, n); 167 } 168 return n; 169 } 170 171 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 172 struct iov_iter *i) 173 { 174 size_t skip, copy, left, wanted; 175 const struct iovec *iov; 176 char __user *buf; 177 void *kaddr, *from; 178 179 if (unlikely(bytes > i->count)) 180 bytes = i->count; 181 182 if (unlikely(!bytes)) 183 return 0; 184 185 might_fault(); 186 wanted = bytes; 187 iov = i->iov; 188 skip = i->iov_offset; 189 buf = iov->iov_base + skip; 190 copy = min(bytes, iov->iov_len - skip); 191 192 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) { 193 kaddr = kmap_atomic(page); 194 from = kaddr + offset; 195 196 /* first chunk, usually the only one */ 197 left = copyout(buf, from, copy); 198 copy -= left; 199 skip += copy; 200 from += copy; 201 bytes -= copy; 202 203 while (unlikely(!left && bytes)) { 204 iov++; 205 buf = iov->iov_base; 206 copy = min(bytes, iov->iov_len); 207 left = copyout(buf, from, copy); 208 copy -= left; 209 skip = copy; 210 from += copy; 211 bytes -= copy; 212 } 213 if (likely(!bytes)) { 214 kunmap_atomic(kaddr); 215 goto done; 216 } 217 offset = from - kaddr; 218 buf += copy; 219 kunmap_atomic(kaddr); 220 copy = min(bytes, iov->iov_len - skip); 221 } 222 /* Too bad - revert to non-atomic kmap */ 223 224 kaddr = kmap(page); 225 from = kaddr + offset; 226 left = copyout(buf, from, copy); 227 copy -= left; 228 skip += copy; 229 from += copy; 230 bytes -= copy; 231 while (unlikely(!left && bytes)) { 232 iov++; 233 buf = iov->iov_base; 234 copy = min(bytes, iov->iov_len); 235 left = copyout(buf, from, copy); 236 copy -= left; 237 skip = copy; 238 from += copy; 239 bytes -= copy; 240 } 241 kunmap(page); 242 243 done: 244 if (skip == iov->iov_len) { 245 iov++; 246 skip = 0; 247 } 248 i->count -= wanted - bytes; 249 i->nr_segs -= iov - i->iov; 250 i->iov = iov; 251 i->iov_offset = skip; 252 return wanted - bytes; 253 } 254 255 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, 256 struct iov_iter *i) 257 { 258 size_t skip, copy, left, wanted; 259 const struct iovec *iov; 260 char __user *buf; 261 void *kaddr, *to; 262 263 if (unlikely(bytes > i->count)) 264 bytes = i->count; 265 266 if (unlikely(!bytes)) 267 return 0; 268 269 might_fault(); 270 wanted = bytes; 271 iov = i->iov; 272 skip = i->iov_offset; 273 buf = iov->iov_base + skip; 274 copy = min(bytes, iov->iov_len - skip); 275 276 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) { 277 kaddr = kmap_atomic(page); 278 to = kaddr + offset; 279 280 /* first chunk, usually the only one */ 281 left = copyin(to, buf, copy); 282 copy -= left; 283 skip += copy; 284 to += copy; 285 bytes -= copy; 286 287 while (unlikely(!left && bytes)) { 288 iov++; 289 buf = iov->iov_base; 290 copy = min(bytes, iov->iov_len); 291 left = copyin(to, buf, copy); 292 copy -= left; 293 skip = copy; 294 to += copy; 295 bytes -= copy; 296 } 297 if (likely(!bytes)) { 298 kunmap_atomic(kaddr); 299 goto done; 300 } 301 offset = to - kaddr; 302 buf += copy; 303 kunmap_atomic(kaddr); 304 copy = min(bytes, iov->iov_len - skip); 305 } 306 /* Too bad - revert to non-atomic kmap */ 307 308 kaddr = kmap(page); 309 to = kaddr + offset; 310 left = copyin(to, buf, copy); 311 copy -= left; 312 skip += copy; 313 to += copy; 314 bytes -= copy; 315 while (unlikely(!left && bytes)) { 316 iov++; 317 buf = iov->iov_base; 318 copy = min(bytes, iov->iov_len); 319 left = copyin(to, buf, copy); 320 copy -= left; 321 skip = copy; 322 to += copy; 323 bytes -= copy; 324 } 325 kunmap(page); 326 327 done: 328 if (skip == iov->iov_len) { 329 iov++; 330 skip = 0; 331 } 332 i->count -= wanted - bytes; 333 i->nr_segs -= iov - i->iov; 334 i->iov = iov; 335 i->iov_offset = skip; 336 return wanted - bytes; 337 } 338 339 #ifdef PIPE_PARANOIA 340 static bool sanity(const struct iov_iter *i) 341 { 342 struct pipe_inode_info *pipe = i->pipe; 343 unsigned int p_head = pipe->head; 344 unsigned int p_tail = pipe->tail; 345 unsigned int p_mask = pipe->ring_size - 1; 346 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 347 unsigned int i_head = i->head; 348 unsigned int idx; 349 350 if (i->iov_offset) { 351 struct pipe_buffer *p; 352 if (unlikely(p_occupancy == 0)) 353 goto Bad; // pipe must be non-empty 354 if (unlikely(i_head != p_head - 1)) 355 goto Bad; // must be at the last buffer... 356 357 p = &pipe->bufs[i_head & p_mask]; 358 if (unlikely(p->offset + p->len != i->iov_offset)) 359 goto Bad; // ... at the end of segment 360 } else { 361 if (i_head != p_head) 362 goto Bad; // must be right after the last buffer 363 } 364 return true; 365 Bad: 366 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); 367 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 368 p_head, p_tail, pipe->ring_size); 369 for (idx = 0; idx < pipe->ring_size; idx++) 370 printk(KERN_ERR "[%p %p %d %d]\n", 371 pipe->bufs[idx].ops, 372 pipe->bufs[idx].page, 373 pipe->bufs[idx].offset, 374 pipe->bufs[idx].len); 375 WARN_ON(1); 376 return false; 377 } 378 #else 379 #define sanity(i) true 380 #endif 381 382 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 383 struct iov_iter *i) 384 { 385 struct pipe_inode_info *pipe = i->pipe; 386 struct pipe_buffer *buf; 387 unsigned int p_tail = pipe->tail; 388 unsigned int p_mask = pipe->ring_size - 1; 389 unsigned int i_head = i->head; 390 size_t off; 391 392 if (unlikely(bytes > i->count)) 393 bytes = i->count; 394 395 if (unlikely(!bytes)) 396 return 0; 397 398 if (!sanity(i)) 399 return 0; 400 401 off = i->iov_offset; 402 buf = &pipe->bufs[i_head & p_mask]; 403 if (off) { 404 if (offset == off && buf->page == page) { 405 /* merge with the last one */ 406 buf->len += bytes; 407 i->iov_offset += bytes; 408 goto out; 409 } 410 i_head++; 411 buf = &pipe->bufs[i_head & p_mask]; 412 } 413 if (pipe_full(i_head, p_tail, pipe->max_usage)) 414 return 0; 415 416 buf->ops = &page_cache_pipe_buf_ops; 417 buf->flags = 0; 418 get_page(page); 419 buf->page = page; 420 buf->offset = offset; 421 buf->len = bytes; 422 423 pipe->head = i_head + 1; 424 i->iov_offset = offset + bytes; 425 i->head = i_head; 426 out: 427 i->count -= bytes; 428 return bytes; 429 } 430 431 /* 432 * fault_in_iov_iter_readable - fault in iov iterator for reading 433 * @i: iterator 434 * @size: maximum length 435 * 436 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 437 * @size. For each iovec, fault in each page that constitutes the iovec. 438 * 439 * Returns the number of bytes not faulted in (like copy_to_user() and 440 * copy_from_user()). 441 * 442 * Always returns 0 for non-userspace iterators. 443 */ 444 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 445 { 446 if (iter_is_iovec(i)) { 447 size_t count = min(size, iov_iter_count(i)); 448 const struct iovec *p; 449 size_t skip; 450 451 size -= count; 452 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 453 size_t len = min(count, p->iov_len - skip); 454 size_t ret; 455 456 if (unlikely(!len)) 457 continue; 458 ret = fault_in_readable(p->iov_base + skip, len); 459 count -= len - ret; 460 if (ret) 461 break; 462 } 463 return count + size; 464 } 465 return 0; 466 } 467 EXPORT_SYMBOL(fault_in_iov_iter_readable); 468 469 /* 470 * fault_in_iov_iter_writeable - fault in iov iterator for writing 471 * @i: iterator 472 * @size: maximum length 473 * 474 * Faults in the iterator using get_user_pages(), i.e., without triggering 475 * hardware page faults. This is primarily useful when we already know that 476 * some or all of the pages in @i aren't in memory. 477 * 478 * Returns the number of bytes not faulted in, like copy_to_user() and 479 * copy_from_user(). 480 * 481 * Always returns 0 for non-user-space iterators. 482 */ 483 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 484 { 485 if (iter_is_iovec(i)) { 486 size_t count = min(size, iov_iter_count(i)); 487 const struct iovec *p; 488 size_t skip; 489 490 size -= count; 491 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 492 size_t len = min(count, p->iov_len - skip); 493 size_t ret; 494 495 if (unlikely(!len)) 496 continue; 497 ret = fault_in_safe_writeable(p->iov_base + skip, len); 498 count -= len - ret; 499 if (ret) 500 break; 501 } 502 return count + size; 503 } 504 return 0; 505 } 506 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 507 508 void iov_iter_init(struct iov_iter *i, unsigned int direction, 509 const struct iovec *iov, unsigned long nr_segs, 510 size_t count) 511 { 512 WARN_ON(direction & ~(READ | WRITE)); 513 *i = (struct iov_iter) { 514 .iter_type = ITER_IOVEC, 515 .nofault = false, 516 .data_source = direction, 517 .iov = iov, 518 .nr_segs = nr_segs, 519 .iov_offset = 0, 520 .count = count 521 }; 522 } 523 EXPORT_SYMBOL(iov_iter_init); 524 525 static inline bool allocated(struct pipe_buffer *buf) 526 { 527 return buf->ops == &default_pipe_buf_ops; 528 } 529 530 static inline void data_start(const struct iov_iter *i, 531 unsigned int *iter_headp, size_t *offp) 532 { 533 unsigned int p_mask = i->pipe->ring_size - 1; 534 unsigned int iter_head = i->head; 535 size_t off = i->iov_offset; 536 537 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || 538 off == PAGE_SIZE)) { 539 iter_head++; 540 off = 0; 541 } 542 *iter_headp = iter_head; 543 *offp = off; 544 } 545 546 static size_t push_pipe(struct iov_iter *i, size_t size, 547 int *iter_headp, size_t *offp) 548 { 549 struct pipe_inode_info *pipe = i->pipe; 550 unsigned int p_tail = pipe->tail; 551 unsigned int p_mask = pipe->ring_size - 1; 552 unsigned int iter_head; 553 size_t off; 554 ssize_t left; 555 556 if (unlikely(size > i->count)) 557 size = i->count; 558 if (unlikely(!size)) 559 return 0; 560 561 left = size; 562 data_start(i, &iter_head, &off); 563 *iter_headp = iter_head; 564 *offp = off; 565 if (off) { 566 left -= PAGE_SIZE - off; 567 if (left <= 0) { 568 pipe->bufs[iter_head & p_mask].len += size; 569 return size; 570 } 571 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; 572 iter_head++; 573 } 574 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { 575 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; 576 struct page *page = alloc_page(GFP_USER); 577 if (!page) 578 break; 579 580 buf->ops = &default_pipe_buf_ops; 581 buf->flags = 0; 582 buf->page = page; 583 buf->offset = 0; 584 buf->len = min_t(ssize_t, left, PAGE_SIZE); 585 left -= buf->len; 586 iter_head++; 587 pipe->head = iter_head; 588 589 if (left == 0) 590 return size; 591 } 592 return size - left; 593 } 594 595 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 596 struct iov_iter *i) 597 { 598 struct pipe_inode_info *pipe = i->pipe; 599 unsigned int p_mask = pipe->ring_size - 1; 600 unsigned int i_head; 601 size_t n, off; 602 603 if (!sanity(i)) 604 return 0; 605 606 bytes = n = push_pipe(i, bytes, &i_head, &off); 607 if (unlikely(!n)) 608 return 0; 609 do { 610 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 611 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); 612 i->head = i_head; 613 i->iov_offset = off + chunk; 614 n -= chunk; 615 addr += chunk; 616 off = 0; 617 i_head++; 618 } while (n); 619 i->count -= bytes; 620 return bytes; 621 } 622 623 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 624 __wsum sum, size_t off) 625 { 626 __wsum next = csum_partial_copy_nocheck(from, to, len); 627 return csum_block_add(sum, next, off); 628 } 629 630 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 631 struct iov_iter *i, __wsum *sump) 632 { 633 struct pipe_inode_info *pipe = i->pipe; 634 unsigned int p_mask = pipe->ring_size - 1; 635 __wsum sum = *sump; 636 size_t off = 0; 637 unsigned int i_head; 638 size_t r; 639 640 if (!sanity(i)) 641 return 0; 642 643 bytes = push_pipe(i, bytes, &i_head, &r); 644 while (bytes) { 645 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r); 646 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 647 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 648 kunmap_local(p); 649 i->head = i_head; 650 i->iov_offset = r + chunk; 651 bytes -= chunk; 652 off += chunk; 653 r = 0; 654 i_head++; 655 } 656 *sump = sum; 657 i->count -= off; 658 return off; 659 } 660 661 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 662 { 663 if (unlikely(iov_iter_is_pipe(i))) 664 return copy_pipe_to_iter(addr, bytes, i); 665 if (iter_is_iovec(i)) 666 might_fault(); 667 iterate_and_advance(i, bytes, base, len, off, 668 copyout(base, addr + off, len), 669 memcpy(base, addr + off, len) 670 ) 671 672 return bytes; 673 } 674 EXPORT_SYMBOL(_copy_to_iter); 675 676 #ifdef CONFIG_ARCH_HAS_COPY_MC 677 static int copyout_mc(void __user *to, const void *from, size_t n) 678 { 679 if (access_ok(to, n)) { 680 instrument_copy_to_user(to, from, n); 681 n = copy_mc_to_user((__force void *) to, from, n); 682 } 683 return n; 684 } 685 686 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 687 struct iov_iter *i) 688 { 689 struct pipe_inode_info *pipe = i->pipe; 690 unsigned int p_mask = pipe->ring_size - 1; 691 unsigned int i_head; 692 size_t n, off, xfer = 0; 693 694 if (!sanity(i)) 695 return 0; 696 697 n = push_pipe(i, bytes, &i_head, &off); 698 while (n) { 699 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 700 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 701 unsigned long rem; 702 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 703 chunk -= rem; 704 kunmap_local(p); 705 i->head = i_head; 706 i->iov_offset = off + chunk; 707 xfer += chunk; 708 if (rem) 709 break; 710 n -= chunk; 711 off = 0; 712 i_head++; 713 } 714 i->count -= xfer; 715 return xfer; 716 } 717 718 /** 719 * _copy_mc_to_iter - copy to iter with source memory error exception handling 720 * @addr: source kernel address 721 * @bytes: total transfer length 722 * @i: destination iterator 723 * 724 * The pmem driver deploys this for the dax operation 725 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 726 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 727 * successfully copied. 728 * 729 * The main differences between this and typical _copy_to_iter(). 730 * 731 * * Typical tail/residue handling after a fault retries the copy 732 * byte-by-byte until the fault happens again. Re-triggering machine 733 * checks is potentially fatal so the implementation uses source 734 * alignment and poison alignment assumptions to avoid re-triggering 735 * hardware exceptions. 736 * 737 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 738 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 739 * a short copy. 740 * 741 * Return: number of bytes copied (may be %0) 742 */ 743 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 744 { 745 if (unlikely(iov_iter_is_pipe(i))) 746 return copy_mc_pipe_to_iter(addr, bytes, i); 747 if (iter_is_iovec(i)) 748 might_fault(); 749 __iterate_and_advance(i, bytes, base, len, off, 750 copyout_mc(base, addr + off, len), 751 copy_mc_to_kernel(base, addr + off, len) 752 ) 753 754 return bytes; 755 } 756 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 757 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 758 759 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 760 { 761 if (unlikely(iov_iter_is_pipe(i))) { 762 WARN_ON(1); 763 return 0; 764 } 765 if (iter_is_iovec(i)) 766 might_fault(); 767 iterate_and_advance(i, bytes, base, len, off, 768 copyin(addr + off, base, len), 769 memcpy(addr + off, base, len) 770 ) 771 772 return bytes; 773 } 774 EXPORT_SYMBOL(_copy_from_iter); 775 776 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 777 { 778 if (unlikely(iov_iter_is_pipe(i))) { 779 WARN_ON(1); 780 return 0; 781 } 782 iterate_and_advance(i, bytes, base, len, off, 783 __copy_from_user_inatomic_nocache(addr + off, base, len), 784 memcpy(addr + off, base, len) 785 ) 786 787 return bytes; 788 } 789 EXPORT_SYMBOL(_copy_from_iter_nocache); 790 791 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 792 /** 793 * _copy_from_iter_flushcache - write destination through cpu cache 794 * @addr: destination kernel address 795 * @bytes: total transfer length 796 * @i: source iterator 797 * 798 * The pmem driver arranges for filesystem-dax to use this facility via 799 * dax_copy_from_iter() for ensuring that writes to persistent memory 800 * are flushed through the CPU cache. It is differentiated from 801 * _copy_from_iter_nocache() in that guarantees all data is flushed for 802 * all iterator types. The _copy_from_iter_nocache() only attempts to 803 * bypass the cache for the ITER_IOVEC case, and on some archs may use 804 * instructions that strand dirty-data in the cache. 805 * 806 * Return: number of bytes copied (may be %0) 807 */ 808 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 809 { 810 if (unlikely(iov_iter_is_pipe(i))) { 811 WARN_ON(1); 812 return 0; 813 } 814 iterate_and_advance(i, bytes, base, len, off, 815 __copy_from_user_flushcache(addr + off, base, len), 816 memcpy_flushcache(addr + off, base, len) 817 ) 818 819 return bytes; 820 } 821 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 822 #endif 823 824 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 825 { 826 struct page *head; 827 size_t v = n + offset; 828 829 /* 830 * The general case needs to access the page order in order 831 * to compute the page size. 832 * However, we mostly deal with order-0 pages and thus can 833 * avoid a possible cache line miss for requests that fit all 834 * page orders. 835 */ 836 if (n <= v && v <= PAGE_SIZE) 837 return true; 838 839 head = compound_head(page); 840 v += (page - head) << PAGE_SHIFT; 841 842 if (likely(n <= v && v <= (page_size(head)))) 843 return true; 844 WARN_ON(1); 845 return false; 846 } 847 848 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 849 struct iov_iter *i) 850 { 851 if (likely(iter_is_iovec(i))) 852 return copy_page_to_iter_iovec(page, offset, bytes, i); 853 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { 854 void *kaddr = kmap_local_page(page); 855 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); 856 kunmap_local(kaddr); 857 return wanted; 858 } 859 if (iov_iter_is_pipe(i)) 860 return copy_page_to_iter_pipe(page, offset, bytes, i); 861 if (unlikely(iov_iter_is_discard(i))) { 862 if (unlikely(i->count < bytes)) 863 bytes = i->count; 864 i->count -= bytes; 865 return bytes; 866 } 867 WARN_ON(1); 868 return 0; 869 } 870 871 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 872 struct iov_iter *i) 873 { 874 size_t res = 0; 875 if (unlikely(!page_copy_sane(page, offset, bytes))) 876 return 0; 877 page += offset / PAGE_SIZE; // first subpage 878 offset %= PAGE_SIZE; 879 while (1) { 880 size_t n = __copy_page_to_iter(page, offset, 881 min(bytes, (size_t)PAGE_SIZE - offset), i); 882 res += n; 883 bytes -= n; 884 if (!bytes || !n) 885 break; 886 offset += n; 887 if (offset == PAGE_SIZE) { 888 page++; 889 offset = 0; 890 } 891 } 892 return res; 893 } 894 EXPORT_SYMBOL(copy_page_to_iter); 895 896 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 897 struct iov_iter *i) 898 { 899 if (unlikely(!page_copy_sane(page, offset, bytes))) 900 return 0; 901 if (likely(iter_is_iovec(i))) 902 return copy_page_from_iter_iovec(page, offset, bytes, i); 903 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { 904 void *kaddr = kmap_local_page(page); 905 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 906 kunmap_local(kaddr); 907 return wanted; 908 } 909 WARN_ON(1); 910 return 0; 911 } 912 EXPORT_SYMBOL(copy_page_from_iter); 913 914 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 915 { 916 struct pipe_inode_info *pipe = i->pipe; 917 unsigned int p_mask = pipe->ring_size - 1; 918 unsigned int i_head; 919 size_t n, off; 920 921 if (!sanity(i)) 922 return 0; 923 924 bytes = n = push_pipe(i, bytes, &i_head, &off); 925 if (unlikely(!n)) 926 return 0; 927 928 do { 929 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 930 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 931 memset(p + off, 0, chunk); 932 kunmap_local(p); 933 i->head = i_head; 934 i->iov_offset = off + chunk; 935 n -= chunk; 936 off = 0; 937 i_head++; 938 } while (n); 939 i->count -= bytes; 940 return bytes; 941 } 942 943 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 944 { 945 if (unlikely(iov_iter_is_pipe(i))) 946 return pipe_zero(bytes, i); 947 iterate_and_advance(i, bytes, base, len, count, 948 clear_user(base, len), 949 memset(base, 0, len) 950 ) 951 952 return bytes; 953 } 954 EXPORT_SYMBOL(iov_iter_zero); 955 956 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 957 struct iov_iter *i) 958 { 959 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 960 if (unlikely(!page_copy_sane(page, offset, bytes))) { 961 kunmap_atomic(kaddr); 962 return 0; 963 } 964 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 965 kunmap_atomic(kaddr); 966 WARN_ON(1); 967 return 0; 968 } 969 iterate_and_advance(i, bytes, base, len, off, 970 copyin(p + off, base, len), 971 memcpy(p + off, base, len) 972 ) 973 kunmap_atomic(kaddr); 974 return bytes; 975 } 976 EXPORT_SYMBOL(copy_page_from_iter_atomic); 977 978 static inline void pipe_truncate(struct iov_iter *i) 979 { 980 struct pipe_inode_info *pipe = i->pipe; 981 unsigned int p_tail = pipe->tail; 982 unsigned int p_head = pipe->head; 983 unsigned int p_mask = pipe->ring_size - 1; 984 985 if (!pipe_empty(p_head, p_tail)) { 986 struct pipe_buffer *buf; 987 unsigned int i_head = i->head; 988 size_t off = i->iov_offset; 989 990 if (off) { 991 buf = &pipe->bufs[i_head & p_mask]; 992 buf->len = off - buf->offset; 993 i_head++; 994 } 995 while (p_head != i_head) { 996 p_head--; 997 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); 998 } 999 1000 pipe->head = p_head; 1001 } 1002 } 1003 1004 static void pipe_advance(struct iov_iter *i, size_t size) 1005 { 1006 struct pipe_inode_info *pipe = i->pipe; 1007 if (size) { 1008 struct pipe_buffer *buf; 1009 unsigned int p_mask = pipe->ring_size - 1; 1010 unsigned int i_head = i->head; 1011 size_t off = i->iov_offset, left = size; 1012 1013 if (off) /* make it relative to the beginning of buffer */ 1014 left += off - pipe->bufs[i_head & p_mask].offset; 1015 while (1) { 1016 buf = &pipe->bufs[i_head & p_mask]; 1017 if (left <= buf->len) 1018 break; 1019 left -= buf->len; 1020 i_head++; 1021 } 1022 i->head = i_head; 1023 i->iov_offset = buf->offset + left; 1024 } 1025 i->count -= size; 1026 /* ... and discard everything past that point */ 1027 pipe_truncate(i); 1028 } 1029 1030 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 1031 { 1032 struct bvec_iter bi; 1033 1034 bi.bi_size = i->count; 1035 bi.bi_bvec_done = i->iov_offset; 1036 bi.bi_idx = 0; 1037 bvec_iter_advance(i->bvec, &bi, size); 1038 1039 i->bvec += bi.bi_idx; 1040 i->nr_segs -= bi.bi_idx; 1041 i->count = bi.bi_size; 1042 i->iov_offset = bi.bi_bvec_done; 1043 } 1044 1045 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 1046 { 1047 const struct iovec *iov, *end; 1048 1049 if (!i->count) 1050 return; 1051 i->count -= size; 1052 1053 size += i->iov_offset; // from beginning of current segment 1054 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 1055 if (likely(size < iov->iov_len)) 1056 break; 1057 size -= iov->iov_len; 1058 } 1059 i->iov_offset = size; 1060 i->nr_segs -= iov - i->iov; 1061 i->iov = iov; 1062 } 1063 1064 void iov_iter_advance(struct iov_iter *i, size_t size) 1065 { 1066 if (unlikely(i->count < size)) 1067 size = i->count; 1068 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 1069 /* iovec and kvec have identical layouts */ 1070 iov_iter_iovec_advance(i, size); 1071 } else if (iov_iter_is_bvec(i)) { 1072 iov_iter_bvec_advance(i, size); 1073 } else if (iov_iter_is_pipe(i)) { 1074 pipe_advance(i, size); 1075 } else if (unlikely(iov_iter_is_xarray(i))) { 1076 i->iov_offset += size; 1077 i->count -= size; 1078 } else if (iov_iter_is_discard(i)) { 1079 i->count -= size; 1080 } 1081 } 1082 EXPORT_SYMBOL(iov_iter_advance); 1083 1084 void iov_iter_revert(struct iov_iter *i, size_t unroll) 1085 { 1086 if (!unroll) 1087 return; 1088 if (WARN_ON(unroll > MAX_RW_COUNT)) 1089 return; 1090 i->count += unroll; 1091 if (unlikely(iov_iter_is_pipe(i))) { 1092 struct pipe_inode_info *pipe = i->pipe; 1093 unsigned int p_mask = pipe->ring_size - 1; 1094 unsigned int i_head = i->head; 1095 size_t off = i->iov_offset; 1096 while (1) { 1097 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; 1098 size_t n = off - b->offset; 1099 if (unroll < n) { 1100 off -= unroll; 1101 break; 1102 } 1103 unroll -= n; 1104 if (!unroll && i_head == i->start_head) { 1105 off = 0; 1106 break; 1107 } 1108 i_head--; 1109 b = &pipe->bufs[i_head & p_mask]; 1110 off = b->offset + b->len; 1111 } 1112 i->iov_offset = off; 1113 i->head = i_head; 1114 pipe_truncate(i); 1115 return; 1116 } 1117 if (unlikely(iov_iter_is_discard(i))) 1118 return; 1119 if (unroll <= i->iov_offset) { 1120 i->iov_offset -= unroll; 1121 return; 1122 } 1123 unroll -= i->iov_offset; 1124 if (iov_iter_is_xarray(i)) { 1125 BUG(); /* We should never go beyond the start of the specified 1126 * range since we might then be straying into pages that 1127 * aren't pinned. 1128 */ 1129 } else if (iov_iter_is_bvec(i)) { 1130 const struct bio_vec *bvec = i->bvec; 1131 while (1) { 1132 size_t n = (--bvec)->bv_len; 1133 i->nr_segs++; 1134 if (unroll <= n) { 1135 i->bvec = bvec; 1136 i->iov_offset = n - unroll; 1137 return; 1138 } 1139 unroll -= n; 1140 } 1141 } else { /* same logics for iovec and kvec */ 1142 const struct iovec *iov = i->iov; 1143 while (1) { 1144 size_t n = (--iov)->iov_len; 1145 i->nr_segs++; 1146 if (unroll <= n) { 1147 i->iov = iov; 1148 i->iov_offset = n - unroll; 1149 return; 1150 } 1151 unroll -= n; 1152 } 1153 } 1154 } 1155 EXPORT_SYMBOL(iov_iter_revert); 1156 1157 /* 1158 * Return the count of just the current iov_iter segment. 1159 */ 1160 size_t iov_iter_single_seg_count(const struct iov_iter *i) 1161 { 1162 if (i->nr_segs > 1) { 1163 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1164 return min(i->count, i->iov->iov_len - i->iov_offset); 1165 if (iov_iter_is_bvec(i)) 1166 return min(i->count, i->bvec->bv_len - i->iov_offset); 1167 } 1168 return i->count; 1169 } 1170 EXPORT_SYMBOL(iov_iter_single_seg_count); 1171 1172 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 1173 const struct kvec *kvec, unsigned long nr_segs, 1174 size_t count) 1175 { 1176 WARN_ON(direction & ~(READ | WRITE)); 1177 *i = (struct iov_iter){ 1178 .iter_type = ITER_KVEC, 1179 .data_source = direction, 1180 .kvec = kvec, 1181 .nr_segs = nr_segs, 1182 .iov_offset = 0, 1183 .count = count 1184 }; 1185 } 1186 EXPORT_SYMBOL(iov_iter_kvec); 1187 1188 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1189 const struct bio_vec *bvec, unsigned long nr_segs, 1190 size_t count) 1191 { 1192 WARN_ON(direction & ~(READ | WRITE)); 1193 *i = (struct iov_iter){ 1194 .iter_type = ITER_BVEC, 1195 .data_source = direction, 1196 .bvec = bvec, 1197 .nr_segs = nr_segs, 1198 .iov_offset = 0, 1199 .count = count 1200 }; 1201 } 1202 EXPORT_SYMBOL(iov_iter_bvec); 1203 1204 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1205 struct pipe_inode_info *pipe, 1206 size_t count) 1207 { 1208 BUG_ON(direction != READ); 1209 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1210 *i = (struct iov_iter){ 1211 .iter_type = ITER_PIPE, 1212 .data_source = false, 1213 .pipe = pipe, 1214 .head = pipe->head, 1215 .start_head = pipe->head, 1216 .iov_offset = 0, 1217 .count = count 1218 }; 1219 } 1220 EXPORT_SYMBOL(iov_iter_pipe); 1221 1222 /** 1223 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1224 * @i: The iterator to initialise. 1225 * @direction: The direction of the transfer. 1226 * @xarray: The xarray to access. 1227 * @start: The start file position. 1228 * @count: The size of the I/O buffer in bytes. 1229 * 1230 * Set up an I/O iterator to either draw data out of the pages attached to an 1231 * inode or to inject data into those pages. The pages *must* be prevented 1232 * from evaporation, either by taking a ref on them or locking them by the 1233 * caller. 1234 */ 1235 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1236 struct xarray *xarray, loff_t start, size_t count) 1237 { 1238 BUG_ON(direction & ~1); 1239 *i = (struct iov_iter) { 1240 .iter_type = ITER_XARRAY, 1241 .data_source = direction, 1242 .xarray = xarray, 1243 .xarray_start = start, 1244 .count = count, 1245 .iov_offset = 0 1246 }; 1247 } 1248 EXPORT_SYMBOL(iov_iter_xarray); 1249 1250 /** 1251 * iov_iter_discard - Initialise an I/O iterator that discards data 1252 * @i: The iterator to initialise. 1253 * @direction: The direction of the transfer. 1254 * @count: The size of the I/O buffer in bytes. 1255 * 1256 * Set up an I/O iterator that just discards everything that's written to it. 1257 * It's only available as a READ iterator. 1258 */ 1259 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1260 { 1261 BUG_ON(direction != READ); 1262 *i = (struct iov_iter){ 1263 .iter_type = ITER_DISCARD, 1264 .data_source = false, 1265 .count = count, 1266 .iov_offset = 0 1267 }; 1268 } 1269 EXPORT_SYMBOL(iov_iter_discard); 1270 1271 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1272 { 1273 unsigned long res = 0; 1274 size_t size = i->count; 1275 size_t skip = i->iov_offset; 1276 unsigned k; 1277 1278 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1279 size_t len = i->iov[k].iov_len - skip; 1280 if (len) { 1281 res |= (unsigned long)i->iov[k].iov_base + skip; 1282 if (len > size) 1283 len = size; 1284 res |= len; 1285 size -= len; 1286 if (!size) 1287 break; 1288 } 1289 } 1290 return res; 1291 } 1292 1293 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1294 { 1295 unsigned res = 0; 1296 size_t size = i->count; 1297 unsigned skip = i->iov_offset; 1298 unsigned k; 1299 1300 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1301 size_t len = i->bvec[k].bv_len - skip; 1302 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1303 if (len > size) 1304 len = size; 1305 res |= len; 1306 size -= len; 1307 if (!size) 1308 break; 1309 } 1310 return res; 1311 } 1312 1313 unsigned long iov_iter_alignment(const struct iov_iter *i) 1314 { 1315 /* iovec and kvec have identical layouts */ 1316 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1317 return iov_iter_alignment_iovec(i); 1318 1319 if (iov_iter_is_bvec(i)) 1320 return iov_iter_alignment_bvec(i); 1321 1322 if (iov_iter_is_pipe(i)) { 1323 unsigned int p_mask = i->pipe->ring_size - 1; 1324 size_t size = i->count; 1325 1326 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) 1327 return size | i->iov_offset; 1328 return size; 1329 } 1330 1331 if (iov_iter_is_xarray(i)) 1332 return (i->xarray_start + i->iov_offset) | i->count; 1333 1334 return 0; 1335 } 1336 EXPORT_SYMBOL(iov_iter_alignment); 1337 1338 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1339 { 1340 unsigned long res = 0; 1341 unsigned long v = 0; 1342 size_t size = i->count; 1343 unsigned k; 1344 1345 if (WARN_ON(!iter_is_iovec(i))) 1346 return ~0U; 1347 1348 for (k = 0; k < i->nr_segs; k++) { 1349 if (i->iov[k].iov_len) { 1350 unsigned long base = (unsigned long)i->iov[k].iov_base; 1351 if (v) // if not the first one 1352 res |= base | v; // this start | previous end 1353 v = base + i->iov[k].iov_len; 1354 if (size <= i->iov[k].iov_len) 1355 break; 1356 size -= i->iov[k].iov_len; 1357 } 1358 } 1359 return res; 1360 } 1361 EXPORT_SYMBOL(iov_iter_gap_alignment); 1362 1363 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1364 size_t maxsize, 1365 struct page **pages, 1366 int iter_head, 1367 size_t *start) 1368 { 1369 struct pipe_inode_info *pipe = i->pipe; 1370 unsigned int p_mask = pipe->ring_size - 1; 1371 ssize_t n = push_pipe(i, maxsize, &iter_head, start); 1372 if (!n) 1373 return -EFAULT; 1374 1375 maxsize = n; 1376 n += *start; 1377 while (n > 0) { 1378 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); 1379 iter_head++; 1380 n -= PAGE_SIZE; 1381 } 1382 1383 return maxsize; 1384 } 1385 1386 static ssize_t pipe_get_pages(struct iov_iter *i, 1387 struct page **pages, size_t maxsize, unsigned maxpages, 1388 size_t *start) 1389 { 1390 unsigned int iter_head, npages; 1391 size_t capacity; 1392 1393 if (!sanity(i)) 1394 return -EFAULT; 1395 1396 data_start(i, &iter_head, start); 1397 /* Amount of free space: some of this one + all after this one */ 1398 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1399 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1400 1401 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); 1402 } 1403 1404 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1405 pgoff_t index, unsigned int nr_pages) 1406 { 1407 XA_STATE(xas, xa, index); 1408 struct page *page; 1409 unsigned int ret = 0; 1410 1411 rcu_read_lock(); 1412 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1413 if (xas_retry(&xas, page)) 1414 continue; 1415 1416 /* Has the page moved or been split? */ 1417 if (unlikely(page != xas_reload(&xas))) { 1418 xas_reset(&xas); 1419 continue; 1420 } 1421 1422 pages[ret] = find_subpage(page, xas.xa_index); 1423 get_page(pages[ret]); 1424 if (++ret == nr_pages) 1425 break; 1426 } 1427 rcu_read_unlock(); 1428 return ret; 1429 } 1430 1431 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1432 struct page **pages, size_t maxsize, 1433 unsigned maxpages, size_t *_start_offset) 1434 { 1435 unsigned nr, offset; 1436 pgoff_t index, count; 1437 size_t size = maxsize, actual; 1438 loff_t pos; 1439 1440 if (!size || !maxpages) 1441 return 0; 1442 1443 pos = i->xarray_start + i->iov_offset; 1444 index = pos >> PAGE_SHIFT; 1445 offset = pos & ~PAGE_MASK; 1446 *_start_offset = offset; 1447 1448 count = 1; 1449 if (size > PAGE_SIZE - offset) { 1450 size -= PAGE_SIZE - offset; 1451 count += size >> PAGE_SHIFT; 1452 size &= ~PAGE_MASK; 1453 if (size) 1454 count++; 1455 } 1456 1457 if (count > maxpages) 1458 count = maxpages; 1459 1460 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1461 if (nr == 0) 1462 return 0; 1463 1464 actual = PAGE_SIZE * nr; 1465 actual -= offset; 1466 if (nr == count && size > 0) { 1467 unsigned last_offset = (nr > 1) ? 0 : offset; 1468 actual -= PAGE_SIZE - (last_offset + size); 1469 } 1470 return actual; 1471 } 1472 1473 /* must be done on non-empty ITER_IOVEC one */ 1474 static unsigned long first_iovec_segment(const struct iov_iter *i, 1475 size_t *size, size_t *start, 1476 size_t maxsize, unsigned maxpages) 1477 { 1478 size_t skip; 1479 long k; 1480 1481 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1482 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; 1483 size_t len = i->iov[k].iov_len - skip; 1484 1485 if (unlikely(!len)) 1486 continue; 1487 if (len > maxsize) 1488 len = maxsize; 1489 len += (*start = addr % PAGE_SIZE); 1490 if (len > maxpages * PAGE_SIZE) 1491 len = maxpages * PAGE_SIZE; 1492 *size = len; 1493 return addr & PAGE_MASK; 1494 } 1495 BUG(); // if it had been empty, we wouldn't get called 1496 } 1497 1498 /* must be done on non-empty ITER_BVEC one */ 1499 static struct page *first_bvec_segment(const struct iov_iter *i, 1500 size_t *size, size_t *start, 1501 size_t maxsize, unsigned maxpages) 1502 { 1503 struct page *page; 1504 size_t skip = i->iov_offset, len; 1505 1506 len = i->bvec->bv_len - skip; 1507 if (len > maxsize) 1508 len = maxsize; 1509 skip += i->bvec->bv_offset; 1510 page = i->bvec->bv_page + skip / PAGE_SIZE; 1511 len += (*start = skip % PAGE_SIZE); 1512 if (len > maxpages * PAGE_SIZE) 1513 len = maxpages * PAGE_SIZE; 1514 *size = len; 1515 return page; 1516 } 1517 1518 ssize_t iov_iter_get_pages(struct iov_iter *i, 1519 struct page **pages, size_t maxsize, unsigned maxpages, 1520 size_t *start) 1521 { 1522 size_t len; 1523 int n, res; 1524 1525 if (maxsize > i->count) 1526 maxsize = i->count; 1527 if (!maxsize) 1528 return 0; 1529 1530 if (likely(iter_is_iovec(i))) { 1531 unsigned int gup_flags = 0; 1532 unsigned long addr; 1533 1534 if (iov_iter_rw(i) != WRITE) 1535 gup_flags |= FOLL_WRITE; 1536 if (i->nofault) 1537 gup_flags |= FOLL_NOFAULT; 1538 1539 addr = first_iovec_segment(i, &len, start, maxsize, maxpages); 1540 n = DIV_ROUND_UP(len, PAGE_SIZE); 1541 res = get_user_pages_fast(addr, n, gup_flags, pages); 1542 if (unlikely(res <= 0)) 1543 return res; 1544 return (res == n ? len : res * PAGE_SIZE) - *start; 1545 } 1546 if (iov_iter_is_bvec(i)) { 1547 struct page *page; 1548 1549 page = first_bvec_segment(i, &len, start, maxsize, maxpages); 1550 n = DIV_ROUND_UP(len, PAGE_SIZE); 1551 while (n--) 1552 get_page(*pages++ = page++); 1553 return len - *start; 1554 } 1555 if (iov_iter_is_pipe(i)) 1556 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1557 if (iov_iter_is_xarray(i)) 1558 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1559 return -EFAULT; 1560 } 1561 EXPORT_SYMBOL(iov_iter_get_pages); 1562 1563 static struct page **get_pages_array(size_t n) 1564 { 1565 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1566 } 1567 1568 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1569 struct page ***pages, size_t maxsize, 1570 size_t *start) 1571 { 1572 struct page **p; 1573 unsigned int iter_head, npages; 1574 ssize_t n; 1575 1576 if (!sanity(i)) 1577 return -EFAULT; 1578 1579 data_start(i, &iter_head, start); 1580 /* Amount of free space: some of this one + all after this one */ 1581 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1582 n = npages * PAGE_SIZE - *start; 1583 if (maxsize > n) 1584 maxsize = n; 1585 else 1586 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1587 p = get_pages_array(npages); 1588 if (!p) 1589 return -ENOMEM; 1590 n = __pipe_get_pages(i, maxsize, p, iter_head, start); 1591 if (n > 0) 1592 *pages = p; 1593 else 1594 kvfree(p); 1595 return n; 1596 } 1597 1598 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1599 struct page ***pages, size_t maxsize, 1600 size_t *_start_offset) 1601 { 1602 struct page **p; 1603 unsigned nr, offset; 1604 pgoff_t index, count; 1605 size_t size = maxsize, actual; 1606 loff_t pos; 1607 1608 if (!size) 1609 return 0; 1610 1611 pos = i->xarray_start + i->iov_offset; 1612 index = pos >> PAGE_SHIFT; 1613 offset = pos & ~PAGE_MASK; 1614 *_start_offset = offset; 1615 1616 count = 1; 1617 if (size > PAGE_SIZE - offset) { 1618 size -= PAGE_SIZE - offset; 1619 count += size >> PAGE_SHIFT; 1620 size &= ~PAGE_MASK; 1621 if (size) 1622 count++; 1623 } 1624 1625 p = get_pages_array(count); 1626 if (!p) 1627 return -ENOMEM; 1628 *pages = p; 1629 1630 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1631 if (nr == 0) 1632 return 0; 1633 1634 actual = PAGE_SIZE * nr; 1635 actual -= offset; 1636 if (nr == count && size > 0) { 1637 unsigned last_offset = (nr > 1) ? 0 : offset; 1638 actual -= PAGE_SIZE - (last_offset + size); 1639 } 1640 return actual; 1641 } 1642 1643 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1644 struct page ***pages, size_t maxsize, 1645 size_t *start) 1646 { 1647 struct page **p; 1648 size_t len; 1649 int n, res; 1650 1651 if (maxsize > i->count) 1652 maxsize = i->count; 1653 if (!maxsize) 1654 return 0; 1655 1656 if (likely(iter_is_iovec(i))) { 1657 unsigned int gup_flags = 0; 1658 unsigned long addr; 1659 1660 if (iov_iter_rw(i) != WRITE) 1661 gup_flags |= FOLL_WRITE; 1662 if (i->nofault) 1663 gup_flags |= FOLL_NOFAULT; 1664 1665 addr = first_iovec_segment(i, &len, start, maxsize, ~0U); 1666 n = DIV_ROUND_UP(len, PAGE_SIZE); 1667 p = get_pages_array(n); 1668 if (!p) 1669 return -ENOMEM; 1670 res = get_user_pages_fast(addr, n, gup_flags, p); 1671 if (unlikely(res <= 0)) { 1672 kvfree(p); 1673 *pages = NULL; 1674 return res; 1675 } 1676 *pages = p; 1677 return (res == n ? len : res * PAGE_SIZE) - *start; 1678 } 1679 if (iov_iter_is_bvec(i)) { 1680 struct page *page; 1681 1682 page = first_bvec_segment(i, &len, start, maxsize, ~0U); 1683 n = DIV_ROUND_UP(len, PAGE_SIZE); 1684 *pages = p = get_pages_array(n); 1685 if (!p) 1686 return -ENOMEM; 1687 while (n--) 1688 get_page(*p++ = page++); 1689 return len - *start; 1690 } 1691 if (iov_iter_is_pipe(i)) 1692 return pipe_get_pages_alloc(i, pages, maxsize, start); 1693 if (iov_iter_is_xarray(i)) 1694 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1695 return -EFAULT; 1696 } 1697 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1698 1699 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1700 struct iov_iter *i) 1701 { 1702 __wsum sum, next; 1703 sum = *csum; 1704 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1705 WARN_ON(1); 1706 return 0; 1707 } 1708 iterate_and_advance(i, bytes, base, len, off, ({ 1709 next = csum_and_copy_from_user(base, addr + off, len); 1710 sum = csum_block_add(sum, next, off); 1711 next ? 0 : len; 1712 }), ({ 1713 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1714 }) 1715 ) 1716 *csum = sum; 1717 return bytes; 1718 } 1719 EXPORT_SYMBOL(csum_and_copy_from_iter); 1720 1721 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1722 struct iov_iter *i) 1723 { 1724 struct csum_state *csstate = _csstate; 1725 __wsum sum, next; 1726 1727 if (unlikely(iov_iter_is_discard(i))) { 1728 WARN_ON(1); /* for now */ 1729 return 0; 1730 } 1731 1732 sum = csum_shift(csstate->csum, csstate->off); 1733 if (unlikely(iov_iter_is_pipe(i))) 1734 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1735 else iterate_and_advance(i, bytes, base, len, off, ({ 1736 next = csum_and_copy_to_user(addr + off, base, len); 1737 sum = csum_block_add(sum, next, off); 1738 next ? 0 : len; 1739 }), ({ 1740 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1741 }) 1742 ) 1743 csstate->csum = csum_shift(sum, csstate->off); 1744 csstate->off += bytes; 1745 return bytes; 1746 } 1747 EXPORT_SYMBOL(csum_and_copy_to_iter); 1748 1749 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1750 struct iov_iter *i) 1751 { 1752 #ifdef CONFIG_CRYPTO_HASH 1753 struct ahash_request *hash = hashp; 1754 struct scatterlist sg; 1755 size_t copied; 1756 1757 copied = copy_to_iter(addr, bytes, i); 1758 sg_init_one(&sg, addr, copied); 1759 ahash_request_set_crypt(hash, &sg, NULL, copied); 1760 crypto_ahash_update(hash); 1761 return copied; 1762 #else 1763 return 0; 1764 #endif 1765 } 1766 EXPORT_SYMBOL(hash_and_copy_to_iter); 1767 1768 static int iov_npages(const struct iov_iter *i, int maxpages) 1769 { 1770 size_t skip = i->iov_offset, size = i->count; 1771 const struct iovec *p; 1772 int npages = 0; 1773 1774 for (p = i->iov; size; skip = 0, p++) { 1775 unsigned offs = offset_in_page(p->iov_base + skip); 1776 size_t len = min(p->iov_len - skip, size); 1777 1778 if (len) { 1779 size -= len; 1780 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1781 if (unlikely(npages > maxpages)) 1782 return maxpages; 1783 } 1784 } 1785 return npages; 1786 } 1787 1788 static int bvec_npages(const struct iov_iter *i, int maxpages) 1789 { 1790 size_t skip = i->iov_offset, size = i->count; 1791 const struct bio_vec *p; 1792 int npages = 0; 1793 1794 for (p = i->bvec; size; skip = 0, p++) { 1795 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1796 size_t len = min(p->bv_len - skip, size); 1797 1798 size -= len; 1799 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1800 if (unlikely(npages > maxpages)) 1801 return maxpages; 1802 } 1803 return npages; 1804 } 1805 1806 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1807 { 1808 if (unlikely(!i->count)) 1809 return 0; 1810 /* iovec and kvec have identical layouts */ 1811 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1812 return iov_npages(i, maxpages); 1813 if (iov_iter_is_bvec(i)) 1814 return bvec_npages(i, maxpages); 1815 if (iov_iter_is_pipe(i)) { 1816 unsigned int iter_head; 1817 int npages; 1818 size_t off; 1819 1820 if (!sanity(i)) 1821 return 0; 1822 1823 data_start(i, &iter_head, &off); 1824 /* some of this one + all after this one */ 1825 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1826 return min(npages, maxpages); 1827 } 1828 if (iov_iter_is_xarray(i)) { 1829 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1830 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1831 return min(npages, maxpages); 1832 } 1833 return 0; 1834 } 1835 EXPORT_SYMBOL(iov_iter_npages); 1836 1837 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1838 { 1839 *new = *old; 1840 if (unlikely(iov_iter_is_pipe(new))) { 1841 WARN_ON(1); 1842 return NULL; 1843 } 1844 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) 1845 return NULL; 1846 if (iov_iter_is_bvec(new)) 1847 return new->bvec = kmemdup(new->bvec, 1848 new->nr_segs * sizeof(struct bio_vec), 1849 flags); 1850 else 1851 /* iovec and kvec have identical layout */ 1852 return new->iov = kmemdup(new->iov, 1853 new->nr_segs * sizeof(struct iovec), 1854 flags); 1855 } 1856 EXPORT_SYMBOL(dup_iter); 1857 1858 static int copy_compat_iovec_from_user(struct iovec *iov, 1859 const struct iovec __user *uvec, unsigned long nr_segs) 1860 { 1861 const struct compat_iovec __user *uiov = 1862 (const struct compat_iovec __user *)uvec; 1863 int ret = -EFAULT, i; 1864 1865 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1866 return -EFAULT; 1867 1868 for (i = 0; i < nr_segs; i++) { 1869 compat_uptr_t buf; 1870 compat_ssize_t len; 1871 1872 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1873 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1874 1875 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1876 if (len < 0) { 1877 ret = -EINVAL; 1878 goto uaccess_end; 1879 } 1880 iov[i].iov_base = compat_ptr(buf); 1881 iov[i].iov_len = len; 1882 } 1883 1884 ret = 0; 1885 uaccess_end: 1886 user_access_end(); 1887 return ret; 1888 } 1889 1890 static int copy_iovec_from_user(struct iovec *iov, 1891 const struct iovec __user *uvec, unsigned long nr_segs) 1892 { 1893 unsigned long seg; 1894 1895 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1896 return -EFAULT; 1897 for (seg = 0; seg < nr_segs; seg++) { 1898 if ((ssize_t)iov[seg].iov_len < 0) 1899 return -EINVAL; 1900 } 1901 1902 return 0; 1903 } 1904 1905 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1906 unsigned long nr_segs, unsigned long fast_segs, 1907 struct iovec *fast_iov, bool compat) 1908 { 1909 struct iovec *iov = fast_iov; 1910 int ret; 1911 1912 /* 1913 * SuS says "The readv() function *may* fail if the iovcnt argument was 1914 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1915 * traditionally returned zero for zero segments, so... 1916 */ 1917 if (nr_segs == 0) 1918 return iov; 1919 if (nr_segs > UIO_MAXIOV) 1920 return ERR_PTR(-EINVAL); 1921 if (nr_segs > fast_segs) { 1922 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1923 if (!iov) 1924 return ERR_PTR(-ENOMEM); 1925 } 1926 1927 if (compat) 1928 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1929 else 1930 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1931 if (ret) { 1932 if (iov != fast_iov) 1933 kfree(iov); 1934 return ERR_PTR(ret); 1935 } 1936 1937 return iov; 1938 } 1939 1940 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1941 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1942 struct iov_iter *i, bool compat) 1943 { 1944 ssize_t total_len = 0; 1945 unsigned long seg; 1946 struct iovec *iov; 1947 1948 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1949 if (IS_ERR(iov)) { 1950 *iovp = NULL; 1951 return PTR_ERR(iov); 1952 } 1953 1954 /* 1955 * According to the Single Unix Specification we should return EINVAL if 1956 * an element length is < 0 when cast to ssize_t or if the total length 1957 * would overflow the ssize_t return value of the system call. 1958 * 1959 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1960 * overflow case. 1961 */ 1962 for (seg = 0; seg < nr_segs; seg++) { 1963 ssize_t len = (ssize_t)iov[seg].iov_len; 1964 1965 if (!access_ok(iov[seg].iov_base, len)) { 1966 if (iov != *iovp) 1967 kfree(iov); 1968 *iovp = NULL; 1969 return -EFAULT; 1970 } 1971 1972 if (len > MAX_RW_COUNT - total_len) { 1973 len = MAX_RW_COUNT - total_len; 1974 iov[seg].iov_len = len; 1975 } 1976 total_len += len; 1977 } 1978 1979 iov_iter_init(i, type, iov, nr_segs, total_len); 1980 if (iov == *iovp) 1981 *iovp = NULL; 1982 else 1983 *iovp = iov; 1984 return total_len; 1985 } 1986 1987 /** 1988 * import_iovec() - Copy an array of &struct iovec from userspace 1989 * into the kernel, check that it is valid, and initialize a new 1990 * &struct iov_iter iterator to access it. 1991 * 1992 * @type: One of %READ or %WRITE. 1993 * @uvec: Pointer to the userspace array. 1994 * @nr_segs: Number of elements in userspace array. 1995 * @fast_segs: Number of elements in @iov. 1996 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1997 * on-stack) kernel array. 1998 * @i: Pointer to iterator that will be initialized on success. 1999 * 2000 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 2001 * then this function places %NULL in *@iov on return. Otherwise, a new 2002 * array will be allocated and the result placed in *@iov. This means that 2003 * the caller may call kfree() on *@iov regardless of whether the small 2004 * on-stack array was used or not (and regardless of whether this function 2005 * returns an error or not). 2006 * 2007 * Return: Negative error code on error, bytes imported on success 2008 */ 2009 ssize_t import_iovec(int type, const struct iovec __user *uvec, 2010 unsigned nr_segs, unsigned fast_segs, 2011 struct iovec **iovp, struct iov_iter *i) 2012 { 2013 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 2014 in_compat_syscall()); 2015 } 2016 EXPORT_SYMBOL(import_iovec); 2017 2018 int import_single_range(int rw, void __user *buf, size_t len, 2019 struct iovec *iov, struct iov_iter *i) 2020 { 2021 if (len > MAX_RW_COUNT) 2022 len = MAX_RW_COUNT; 2023 if (unlikely(!access_ok(buf, len))) 2024 return -EFAULT; 2025 2026 iov->iov_base = buf; 2027 iov->iov_len = len; 2028 iov_iter_init(i, rw, iov, 1, len); 2029 return 0; 2030 } 2031 EXPORT_SYMBOL(import_single_range); 2032 2033 /** 2034 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 2035 * iov_iter_save_state() was called. 2036 * 2037 * @i: &struct iov_iter to restore 2038 * @state: state to restore from 2039 * 2040 * Used after iov_iter_save_state() to bring restore @i, if operations may 2041 * have advanced it. 2042 * 2043 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 2044 */ 2045 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 2046 { 2047 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 2048 !iov_iter_is_kvec(i)) 2049 return; 2050 i->iov_offset = state->iov_offset; 2051 i->count = state->count; 2052 /* 2053 * For the *vec iters, nr_segs + iov is constant - if we increment 2054 * the vec, then we also decrement the nr_segs count. Hence we don't 2055 * need to track both of these, just one is enough and we can deduct 2056 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 2057 * size, so we can just increment the iov pointer as they are unionzed. 2058 * ITER_BVEC _may_ be the same size on some archs, but on others it is 2059 * not. Be safe and handle it separately. 2060 */ 2061 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 2062 if (iov_iter_is_bvec(i)) 2063 i->bvec -= state->nr_segs - i->nr_segs; 2064 else 2065 i->iov -= state->nr_segs - i->nr_segs; 2066 i->nr_segs = state->nr_segs; 2067 } 2068