buffered-io.c (c441bfb5f2866de71e092c1b9d866a65978dfe1a) buffered-io.c (f0b65f39ac505e8f1dcdaa165aa7b8c0bd6fd454)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
5 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>

--- 380 unchanged lines hidden (view full) ---

389 * It may allocate memory, but should avoid costly allocations. This
390 * function is called with memalloc_nofs set, so allocations will not cause
391 * the filesystem to be reentered.
392 */
393void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
394{
395 struct inode *inode = rac->mapping->host;
396 loff_t pos = readahead_pos(rac);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
5 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>

--- 380 unchanged lines hidden (view full) ---

389 * It may allocate memory, but should avoid costly allocations. This
390 * function is called with memalloc_nofs set, so allocations will not cause
391 * the filesystem to be reentered.
392 */
393void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
394{
395 struct inode *inode = rac->mapping->host;
396 loff_t pos = readahead_pos(rac);
397 size_t length = readahead_length(rac);
397 loff_t length = readahead_length(rac);
398 struct iomap_readpage_ctx ctx = {
399 .rac = rac,
400 };
401
402 trace_iomap_readahead(inode, readahead_count(rac));
403
404 while (length > 0) {
398 struct iomap_readpage_ctx ctx = {
399 .rac = rac,
400 };
401
402 trace_iomap_readahead(inode, readahead_count(rac));
403
404 while (length > 0) {
405 ssize_t ret = iomap_apply(inode, pos, length, 0, ops,
405 loff_t ret = iomap_apply(inode, pos, length, 0, ops,
406 &ctx, iomap_readahead_actor);
407 if (ret <= 0) {
408 WARN_ON_ONCE(ret == 0);
409 break;
410 }
411 pos += ret;
412 length -= ret;
413 }

--- 352 unchanged lines hidden (view full) ---

766 if (bytes > length)
767 bytes = length;
768
769 /*
770 * Bring in the user page that we will copy from _first_.
771 * Otherwise there's a nasty deadlock on copying from the
772 * same page as we're writing to, without it being marked
773 * up-to-date.
406 &ctx, iomap_readahead_actor);
407 if (ret <= 0) {
408 WARN_ON_ONCE(ret == 0);
409 break;
410 }
411 pos += ret;
412 length -= ret;
413 }

--- 352 unchanged lines hidden (view full) ---

766 if (bytes > length)
767 bytes = length;
768
769 /*
770 * Bring in the user page that we will copy from _first_.
771 * Otherwise there's a nasty deadlock on copying from the
772 * same page as we're writing to, without it being marked
773 * up-to-date.
774 *
775 * Not only is this an optimisation, but it is also required
776 * to check that the address is actually valid, when atomic
777 * usercopies are used, below.
778 */
779 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
780 status = -EFAULT;
781 break;
782 }
783
784 status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
785 srcmap);
786 if (unlikely(status))
787 break;
788
789 if (mapping_writably_mapped(inode->i_mapping))
790 flush_dcache_page(page);
791
774 */
775 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
776 status = -EFAULT;
777 break;
778 }
779
780 status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
781 srcmap);
782 if (unlikely(status))
783 break;
784
785 if (mapping_writably_mapped(inode->i_mapping))
786 flush_dcache_page(page);
787
792 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
788 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
793
789
794 copied = iomap_write_end(inode, pos, bytes, copied, page, iomap,
790 status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
795 srcmap);
796
791 srcmap);
792
797 cond_resched();
793 if (unlikely(copied != status))
794 iov_iter_revert(i, copied - status);
798
795
799 iov_iter_advance(i, copied);
800 if (unlikely(copied == 0)) {
796 cond_resched();
797 if (unlikely(status == 0)) {
801 /*
798 /*
802 * If we were unable to copy any data at all, we must
803 * fall back to a single segment length write.
804 *
805 * If we didn't fallback here, we could livelock
806 * because not all segments in the iov can be copied at
807 * once without a pagefault.
799 * A short copy made iomap_write_end() reject the
800 * thing entirely. Might be memory poisoning
801 * halfway through, might be a race with munmap,
802 * might be severe memory pressure.
808 */
803 */
809 bytes = min_t(unsigned long, PAGE_SIZE - offset,
810 iov_iter_single_seg_count(i));
804 if (copied)
805 bytes = copied;
811 goto again;
812 }
806 goto again;
807 }
813 pos += copied;
814 written += copied;
815 length -= copied;
808 pos += status;
809 written += status;
810 length -= status;
816
817 balance_dirty_pages_ratelimited(inode->i_mapping);
818 } while (iov_iter_count(i) && length);
819
820 return written ? written : status;
821}
822
823ssize_t

--- 744 unchanged lines hidden ---
811
812 balance_dirty_pages_ratelimited(inode->i_mapping);
813 } while (iov_iter_count(i) && length);
814
815 return written ? written : status;
816}
817
818ssize_t

--- 744 unchanged lines hidden ---