xref: /linux/fs/iomap/buffered-io.c (revision a93fbb002310ef04fce504dbf1510f6eb8265188)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (C) 2016-2019 Christoph Hellwig.
5  */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
20 #include "trace.h"
21 
22 #include "../internal.h"
23 
24 #define IOEND_BATCH_SIZE	4096
25 
26 /*
27  * Structure allocated for each folio when block size < folio size
28  * to track sub-folio uptodate status and I/O completions.
29  */
30 struct iomap_page {
31 	atomic_t		read_bytes_pending;
32 	atomic_t		write_bytes_pending;
33 	spinlock_t		uptodate_lock;
34 	unsigned long		uptodate[];
35 };
36 
37 static inline struct iomap_page *to_iomap_page(struct folio *folio)
38 {
39 	if (folio_test_private(folio))
40 		return folio_get_private(folio);
41 	return NULL;
42 }
43 
44 static struct bio_set iomap_ioend_bioset;
45 
46 static struct iomap_page *
47 iomap_page_create(struct inode *inode, struct folio *folio)
48 {
49 	struct iomap_page *iop = to_iomap_page(folio);
50 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
51 
52 	if (iop || nr_blocks <= 1)
53 		return iop;
54 
55 	iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
56 			GFP_NOFS | __GFP_NOFAIL);
57 	spin_lock_init(&iop->uptodate_lock);
58 	if (folio_test_uptodate(folio))
59 		bitmap_fill(iop->uptodate, nr_blocks);
60 	folio_attach_private(folio, iop);
61 	return iop;
62 }
63 
64 static void iomap_page_release(struct folio *folio)
65 {
66 	struct iomap_page *iop = folio_detach_private(folio);
67 	struct inode *inode = folio->mapping->host;
68 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
69 
70 	if (!iop)
71 		return;
72 	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
73 	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
74 	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
75 			folio_test_uptodate(folio));
76 	kfree(iop);
77 }
78 
79 /*
80  * Calculate the range inside the folio that we actually need to read.
81  */
82 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
83 		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
84 {
85 	struct iomap_page *iop = to_iomap_page(folio);
86 	loff_t orig_pos = *pos;
87 	loff_t isize = i_size_read(inode);
88 	unsigned block_bits = inode->i_blkbits;
89 	unsigned block_size = (1 << block_bits);
90 	size_t poff = offset_in_folio(folio, *pos);
91 	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
92 	unsigned first = poff >> block_bits;
93 	unsigned last = (poff + plen - 1) >> block_bits;
94 
95 	/*
96 	 * If the block size is smaller than the page size, we need to check the
97 	 * per-block uptodate status and adjust the offset and length if needed
98 	 * to avoid reading in already uptodate ranges.
99 	 */
100 	if (iop) {
101 		unsigned int i;
102 
103 		/* move forward for each leading block marked uptodate */
104 		for (i = first; i <= last; i++) {
105 			if (!test_bit(i, iop->uptodate))
106 				break;
107 			*pos += block_size;
108 			poff += block_size;
109 			plen -= block_size;
110 			first++;
111 		}
112 
113 		/* truncate len if we find any trailing uptodate block(s) */
114 		for ( ; i <= last; i++) {
115 			if (test_bit(i, iop->uptodate)) {
116 				plen -= (last - i + 1) * block_size;
117 				last = i - 1;
118 				break;
119 			}
120 		}
121 	}
122 
123 	/*
124 	 * If the extent spans the block that contains the i_size, we need to
125 	 * handle both halves separately so that we properly zero data in the
126 	 * page cache for blocks that are entirely outside of i_size.
127 	 */
128 	if (orig_pos <= isize && orig_pos + length > isize) {
129 		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
130 
131 		if (first <= end && last > end)
132 			plen -= (last - end) * block_size;
133 	}
134 
135 	*offp = poff;
136 	*lenp = plen;
137 }
138 
139 static void iomap_iop_set_range_uptodate(struct folio *folio,
140 		struct iomap_page *iop, size_t off, size_t len)
141 {
142 	struct inode *inode = folio->mapping->host;
143 	unsigned first = off >> inode->i_blkbits;
144 	unsigned last = (off + len - 1) >> inode->i_blkbits;
145 	unsigned long flags;
146 
147 	spin_lock_irqsave(&iop->uptodate_lock, flags);
148 	bitmap_set(iop->uptodate, first, last - first + 1);
149 	if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
150 		folio_mark_uptodate(folio);
151 	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
152 }
153 
154 static void iomap_set_range_uptodate(struct folio *folio,
155 		struct iomap_page *iop, size_t off, size_t len)
156 {
157 	if (folio_test_error(folio))
158 		return;
159 
160 	if (iop)
161 		iomap_iop_set_range_uptodate(folio, iop, off, len);
162 	else
163 		folio_mark_uptodate(folio);
164 }
165 
166 static void iomap_finish_folio_read(struct folio *folio, size_t offset,
167 		size_t len, int error)
168 {
169 	struct iomap_page *iop = to_iomap_page(folio);
170 
171 	if (unlikely(error)) {
172 		folio_clear_uptodate(folio);
173 		folio_set_error(folio);
174 	} else {
175 		iomap_set_range_uptodate(folio, iop, offset, len);
176 	}
177 
178 	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
179 		folio_unlock(folio);
180 }
181 
182 static void iomap_read_end_io(struct bio *bio)
183 {
184 	int error = blk_status_to_errno(bio->bi_status);
185 	struct folio_iter fi;
186 
187 	bio_for_each_folio_all(fi, bio)
188 		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
189 	bio_put(bio);
190 }
191 
192 struct iomap_readpage_ctx {
193 	struct folio		*cur_folio;
194 	bool			cur_folio_in_bio;
195 	struct bio		*bio;
196 	struct readahead_control *rac;
197 };
198 
199 /**
200  * iomap_read_inline_data - copy inline data into the page cache
201  * @iter: iteration structure
202  * @folio: folio to copy to
203  *
204  * Copy the inline data in @iter into @folio and zero out the rest of the folio.
205  * Only a single IOMAP_INLINE extent is allowed at the end of each file.
206  * Returns zero for success to complete the read, or the usual negative errno.
207  */
208 static int iomap_read_inline_data(const struct iomap_iter *iter,
209 		struct folio *folio)
210 {
211 	struct iomap_page *iop;
212 	const struct iomap *iomap = iomap_iter_srcmap(iter);
213 	size_t size = i_size_read(iter->inode) - iomap->offset;
214 	size_t poff = offset_in_page(iomap->offset);
215 	size_t offset = offset_in_folio(folio, iomap->offset);
216 	void *addr;
217 
218 	if (folio_test_uptodate(folio))
219 		return 0;
220 
221 	if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
222 		return -EIO;
223 	if (WARN_ON_ONCE(size > PAGE_SIZE -
224 			 offset_in_page(iomap->inline_data)))
225 		return -EIO;
226 	if (WARN_ON_ONCE(size > iomap->length))
227 		return -EIO;
228 	if (offset > 0)
229 		iop = iomap_page_create(iter->inode, folio);
230 	else
231 		iop = to_iomap_page(folio);
232 
233 	addr = kmap_local_folio(folio, offset);
234 	memcpy(addr, iomap->inline_data, size);
235 	memset(addr + size, 0, PAGE_SIZE - poff - size);
236 	kunmap_local(addr);
237 	iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
238 	return 0;
239 }
240 
241 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
242 		loff_t pos)
243 {
244 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
245 
246 	return srcmap->type != IOMAP_MAPPED ||
247 		(srcmap->flags & IOMAP_F_NEW) ||
248 		pos >= i_size_read(iter->inode);
249 }
250 
251 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
252 		struct iomap_readpage_ctx *ctx, loff_t offset)
253 {
254 	const struct iomap *iomap = &iter->iomap;
255 	loff_t pos = iter->pos + offset;
256 	loff_t length = iomap_length(iter) - offset;
257 	struct folio *folio = ctx->cur_folio;
258 	struct iomap_page *iop;
259 	loff_t orig_pos = pos;
260 	size_t poff, plen;
261 	sector_t sector;
262 
263 	if (iomap->type == IOMAP_INLINE)
264 		return iomap_read_inline_data(iter, folio);
265 
266 	/* zero post-eof blocks as the page may be mapped */
267 	iop = iomap_page_create(iter->inode, folio);
268 	iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
269 	if (plen == 0)
270 		goto done;
271 
272 	if (iomap_block_needs_zeroing(iter, pos)) {
273 		folio_zero_range(folio, poff, plen);
274 		iomap_set_range_uptodate(folio, iop, poff, plen);
275 		goto done;
276 	}
277 
278 	ctx->cur_folio_in_bio = true;
279 	if (iop)
280 		atomic_add(plen, &iop->read_bytes_pending);
281 
282 	sector = iomap_sector(iomap, pos);
283 	if (!ctx->bio ||
284 	    bio_end_sector(ctx->bio) != sector ||
285 	    !bio_add_folio(ctx->bio, folio, plen, poff)) {
286 		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
287 		gfp_t orig_gfp = gfp;
288 		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
289 
290 		if (ctx->bio)
291 			submit_bio(ctx->bio);
292 
293 		if (ctx->rac) /* same as readahead_gfp_mask */
294 			gfp |= __GFP_NORETRY | __GFP_NOWARN;
295 		ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
296 				     REQ_OP_READ, gfp);
297 		/*
298 		 * If the bio_alloc fails, try it again for a single page to
299 		 * avoid having to deal with partial page reads.  This emulates
300 		 * what do_mpage_readpage does.
301 		 */
302 		if (!ctx->bio) {
303 			ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
304 					     orig_gfp);
305 		}
306 		if (ctx->rac)
307 			ctx->bio->bi_opf |= REQ_RAHEAD;
308 		ctx->bio->bi_iter.bi_sector = sector;
309 		ctx->bio->bi_end_io = iomap_read_end_io;
310 		bio_add_folio(ctx->bio, folio, plen, poff);
311 	}
312 
313 done:
314 	/*
315 	 * Move the caller beyond our range so that it keeps making progress.
316 	 * For that, we have to include any leading non-uptodate ranges, but
317 	 * we can skip trailing ones as they will be handled in the next
318 	 * iteration.
319 	 */
320 	return pos - orig_pos + plen;
321 }
322 
323 int
324 iomap_readpage(struct page *page, const struct iomap_ops *ops)
325 {
326 	struct folio *folio = page_folio(page);
327 	struct iomap_iter iter = {
328 		.inode		= folio->mapping->host,
329 		.pos		= folio_pos(folio),
330 		.len		= folio_size(folio),
331 	};
332 	struct iomap_readpage_ctx ctx = {
333 		.cur_folio	= folio,
334 	};
335 	int ret;
336 
337 	trace_iomap_readpage(iter.inode, 1);
338 
339 	while ((ret = iomap_iter(&iter, ops)) > 0)
340 		iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
341 
342 	if (ret < 0)
343 		folio_set_error(folio);
344 
345 	if (ctx.bio) {
346 		submit_bio(ctx.bio);
347 		WARN_ON_ONCE(!ctx.cur_folio_in_bio);
348 	} else {
349 		WARN_ON_ONCE(ctx.cur_folio_in_bio);
350 		folio_unlock(folio);
351 	}
352 
353 	/*
354 	 * Just like mpage_readahead and block_read_full_page, we always
355 	 * return 0 and just mark the page as PageError on errors.  This
356 	 * should be cleaned up throughout the stack eventually.
357 	 */
358 	return 0;
359 }
360 EXPORT_SYMBOL_GPL(iomap_readpage);
361 
362 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
363 		struct iomap_readpage_ctx *ctx)
364 {
365 	loff_t length = iomap_length(iter);
366 	loff_t done, ret;
367 
368 	for (done = 0; done < length; done += ret) {
369 		if (ctx->cur_folio &&
370 		    offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
371 			if (!ctx->cur_folio_in_bio)
372 				folio_unlock(ctx->cur_folio);
373 			ctx->cur_folio = NULL;
374 		}
375 		if (!ctx->cur_folio) {
376 			ctx->cur_folio = readahead_folio(ctx->rac);
377 			ctx->cur_folio_in_bio = false;
378 		}
379 		ret = iomap_readpage_iter(iter, ctx, done);
380 		if (ret <= 0)
381 			return ret;
382 	}
383 
384 	return done;
385 }
386 
387 /**
388  * iomap_readahead - Attempt to read pages from a file.
389  * @rac: Describes the pages to be read.
390  * @ops: The operations vector for the filesystem.
391  *
392  * This function is for filesystems to call to implement their readahead
393  * address_space operation.
394  *
395  * Context: The @ops callbacks may submit I/O (eg to read the addresses of
396  * blocks from disc), and may wait for it.  The caller may be trying to
397  * access a different page, and so sleeping excessively should be avoided.
398  * It may allocate memory, but should avoid costly allocations.  This
399  * function is called with memalloc_nofs set, so allocations will not cause
400  * the filesystem to be reentered.
401  */
402 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
403 {
404 	struct iomap_iter iter = {
405 		.inode	= rac->mapping->host,
406 		.pos	= readahead_pos(rac),
407 		.len	= readahead_length(rac),
408 	};
409 	struct iomap_readpage_ctx ctx = {
410 		.rac	= rac,
411 	};
412 
413 	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
414 
415 	while (iomap_iter(&iter, ops) > 0)
416 		iter.processed = iomap_readahead_iter(&iter, &ctx);
417 
418 	if (ctx.bio)
419 		submit_bio(ctx.bio);
420 	if (ctx.cur_folio) {
421 		if (!ctx.cur_folio_in_bio)
422 			folio_unlock(ctx.cur_folio);
423 	}
424 }
425 EXPORT_SYMBOL_GPL(iomap_readahead);
426 
427 /*
428  * iomap_is_partially_uptodate checks whether blocks within a folio are
429  * uptodate or not.
430  *
431  * Returns true if all blocks which correspond to the specified part
432  * of the folio are uptodate.
433  */
434 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
435 {
436 	struct iomap_page *iop = to_iomap_page(folio);
437 	struct inode *inode = folio->mapping->host;
438 	unsigned first, last, i;
439 
440 	if (!iop)
441 		return false;
442 
443 	/* Caller's range may extend past the end of this folio */
444 	count = min(folio_size(folio) - from, count);
445 
446 	/* First and last blocks in range within folio */
447 	first = from >> inode->i_blkbits;
448 	last = (from + count - 1) >> inode->i_blkbits;
449 
450 	for (i = first; i <= last; i++)
451 		if (!test_bit(i, iop->uptodate))
452 			return false;
453 	return true;
454 }
455 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
456 
457 int
458 iomap_releasepage(struct page *page, gfp_t gfp_mask)
459 {
460 	struct folio *folio = page_folio(page);
461 
462 	trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
463 			folio_size(folio));
464 
465 	/*
466 	 * mm accommodates an old ext3 case where clean pages might not have had
467 	 * the dirty bit cleared. Thus, it can send actual dirty pages to
468 	 * ->releasepage() via shrink_active_list(); skip those here.
469 	 */
470 	if (folio_test_dirty(folio) || folio_test_writeback(folio))
471 		return 0;
472 	iomap_page_release(folio);
473 	return 1;
474 }
475 EXPORT_SYMBOL_GPL(iomap_releasepage);
476 
477 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
478 {
479 	trace_iomap_invalidate_folio(folio->mapping->host,
480 					folio_pos(folio) + offset, len);
481 
482 	/*
483 	 * If we're invalidating the entire folio, clear the dirty state
484 	 * from it and release it to avoid unnecessary buildup of the LRU.
485 	 */
486 	if (offset == 0 && len == folio_size(folio)) {
487 		WARN_ON_ONCE(folio_test_writeback(folio));
488 		folio_cancel_dirty(folio);
489 		iomap_page_release(folio);
490 	} else if (folio_test_large(folio)) {
491 		/* Must release the iop so the page can be split */
492 		WARN_ON_ONCE(!folio_test_uptodate(folio) &&
493 			     folio_test_dirty(folio));
494 		iomap_page_release(folio);
495 	}
496 }
497 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
498 
499 #ifdef CONFIG_MIGRATION
500 int
501 iomap_migrate_page(struct address_space *mapping, struct page *newpage,
502 		struct page *page, enum migrate_mode mode)
503 {
504 	struct folio *folio = page_folio(page);
505 	struct folio *newfolio = page_folio(newpage);
506 	int ret;
507 
508 	ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
509 	if (ret != MIGRATEPAGE_SUCCESS)
510 		return ret;
511 
512 	if (folio_test_private(folio))
513 		folio_attach_private(newfolio, folio_detach_private(folio));
514 
515 	if (mode != MIGRATE_SYNC_NO_COPY)
516 		folio_migrate_copy(newfolio, folio);
517 	else
518 		folio_migrate_flags(newfolio, folio);
519 	return MIGRATEPAGE_SUCCESS;
520 }
521 EXPORT_SYMBOL_GPL(iomap_migrate_page);
522 #endif /* CONFIG_MIGRATION */
523 
524 static void
525 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
526 {
527 	loff_t i_size = i_size_read(inode);
528 
529 	/*
530 	 * Only truncate newly allocated pages beyoned EOF, even if the
531 	 * write started inside the existing inode size.
532 	 */
533 	if (pos + len > i_size)
534 		truncate_pagecache_range(inode, max(pos, i_size), pos + len);
535 }
536 
537 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
538 		size_t poff, size_t plen, const struct iomap *iomap)
539 {
540 	struct bio_vec bvec;
541 	struct bio bio;
542 
543 	bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
544 	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
545 	bio_add_folio(&bio, folio, plen, poff);
546 	return submit_bio_wait(&bio);
547 }
548 
549 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
550 		size_t len, struct folio *folio)
551 {
552 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
553 	struct iomap_page *iop = iomap_page_create(iter->inode, folio);
554 	loff_t block_size = i_blocksize(iter->inode);
555 	loff_t block_start = round_down(pos, block_size);
556 	loff_t block_end = round_up(pos + len, block_size);
557 	size_t from = offset_in_folio(folio, pos), to = from + len;
558 	size_t poff, plen;
559 
560 	if (folio_test_uptodate(folio))
561 		return 0;
562 	folio_clear_error(folio);
563 
564 	do {
565 		iomap_adjust_read_range(iter->inode, folio, &block_start,
566 				block_end - block_start, &poff, &plen);
567 		if (plen == 0)
568 			break;
569 
570 		if (!(iter->flags & IOMAP_UNSHARE) &&
571 		    (from <= poff || from >= poff + plen) &&
572 		    (to <= poff || to >= poff + plen))
573 			continue;
574 
575 		if (iomap_block_needs_zeroing(iter, block_start)) {
576 			if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
577 				return -EIO;
578 			folio_zero_segments(folio, poff, from, to, poff + plen);
579 		} else {
580 			int status = iomap_read_folio_sync(block_start, folio,
581 					poff, plen, srcmap);
582 			if (status)
583 				return status;
584 		}
585 		iomap_set_range_uptodate(folio, iop, poff, plen);
586 	} while ((block_start += plen) < block_end);
587 
588 	return 0;
589 }
590 
591 static int iomap_write_begin_inline(const struct iomap_iter *iter,
592 		struct folio *folio)
593 {
594 	/* needs more work for the tailpacking case; disable for now */
595 	if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
596 		return -EIO;
597 	return iomap_read_inline_data(iter, folio);
598 }
599 
600 static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
601 		size_t len, struct folio **foliop)
602 {
603 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
604 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
605 	struct folio *folio;
606 	unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
607 	int status = 0;
608 
609 	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
610 	if (srcmap != &iter->iomap)
611 		BUG_ON(pos + len > srcmap->offset + srcmap->length);
612 
613 	if (fatal_signal_pending(current))
614 		return -EINTR;
615 
616 	if (!mapping_large_folio_support(iter->inode->i_mapping))
617 		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
618 
619 	if (page_ops && page_ops->page_prepare) {
620 		status = page_ops->page_prepare(iter->inode, pos, len);
621 		if (status)
622 			return status;
623 	}
624 
625 	folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
626 			fgp, mapping_gfp_mask(iter->inode->i_mapping));
627 	if (!folio) {
628 		status = -ENOMEM;
629 		goto out_no_page;
630 	}
631 	if (pos + len > folio_pos(folio) + folio_size(folio))
632 		len = folio_pos(folio) + folio_size(folio) - pos;
633 
634 	if (srcmap->type == IOMAP_INLINE)
635 		status = iomap_write_begin_inline(iter, folio);
636 	else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
637 		status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
638 	else
639 		status = __iomap_write_begin(iter, pos, len, folio);
640 
641 	if (unlikely(status))
642 		goto out_unlock;
643 
644 	*foliop = folio;
645 	return 0;
646 
647 out_unlock:
648 	folio_unlock(folio);
649 	folio_put(folio);
650 	iomap_write_failed(iter->inode, pos, len);
651 
652 out_no_page:
653 	if (page_ops && page_ops->page_done)
654 		page_ops->page_done(iter->inode, pos, 0, NULL);
655 	return status;
656 }
657 
658 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
659 		size_t copied, struct folio *folio)
660 {
661 	struct iomap_page *iop = to_iomap_page(folio);
662 	flush_dcache_folio(folio);
663 
664 	/*
665 	 * The blocks that were entirely written will now be uptodate, so we
666 	 * don't have to worry about a readpage reading them and overwriting a
667 	 * partial write.  However, if we've encountered a short write and only
668 	 * partially written into a block, it will not be marked uptodate, so a
669 	 * readpage might come in and destroy our partial write.
670 	 *
671 	 * Do the simplest thing and just treat any short write to a
672 	 * non-uptodate page as a zero-length write, and force the caller to
673 	 * redo the whole thing.
674 	 */
675 	if (unlikely(copied < len && !folio_test_uptodate(folio)))
676 		return 0;
677 	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
678 	filemap_dirty_folio(inode->i_mapping, folio);
679 	return copied;
680 }
681 
682 static size_t iomap_write_end_inline(const struct iomap_iter *iter,
683 		struct folio *folio, loff_t pos, size_t copied)
684 {
685 	const struct iomap *iomap = &iter->iomap;
686 	void *addr;
687 
688 	WARN_ON_ONCE(!folio_test_uptodate(folio));
689 	BUG_ON(!iomap_inline_data_valid(iomap));
690 
691 	flush_dcache_folio(folio);
692 	addr = kmap_local_folio(folio, pos);
693 	memcpy(iomap_inline_data(iomap, pos), addr, copied);
694 	kunmap_local(addr);
695 
696 	mark_inode_dirty(iter->inode);
697 	return copied;
698 }
699 
700 /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
701 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
702 		size_t copied, struct folio *folio)
703 {
704 	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
705 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
706 	loff_t old_size = iter->inode->i_size;
707 	size_t ret;
708 
709 	if (srcmap->type == IOMAP_INLINE) {
710 		ret = iomap_write_end_inline(iter, folio, pos, copied);
711 	} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
712 		ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
713 				copied, &folio->page, NULL);
714 	} else {
715 		ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
716 	}
717 
718 	/*
719 	 * Update the in-memory inode size after copying the data into the page
720 	 * cache.  It's up to the file system to write the updated size to disk,
721 	 * preferably after I/O completion so that no stale data is exposed.
722 	 */
723 	if (pos + ret > old_size) {
724 		i_size_write(iter->inode, pos + ret);
725 		iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
726 	}
727 	folio_unlock(folio);
728 
729 	if (old_size < pos)
730 		pagecache_isize_extended(iter->inode, old_size, pos);
731 	if (page_ops && page_ops->page_done)
732 		page_ops->page_done(iter->inode, pos, ret, &folio->page);
733 	folio_put(folio);
734 
735 	if (ret < len)
736 		iomap_write_failed(iter->inode, pos, len);
737 	return ret;
738 }
739 
740 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
741 {
742 	loff_t length = iomap_length(iter);
743 	loff_t pos = iter->pos;
744 	ssize_t written = 0;
745 	long status = 0;
746 
747 	do {
748 		struct folio *folio;
749 		struct page *page;
750 		unsigned long offset;	/* Offset into pagecache page */
751 		unsigned long bytes;	/* Bytes to write to page */
752 		size_t copied;		/* Bytes copied from user */
753 
754 		offset = offset_in_page(pos);
755 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
756 						iov_iter_count(i));
757 again:
758 		if (bytes > length)
759 			bytes = length;
760 
761 		/*
762 		 * Bring in the user page that we'll copy from _first_.
763 		 * Otherwise there's a nasty deadlock on copying from the
764 		 * same page as we're writing to, without it being marked
765 		 * up-to-date.
766 		 */
767 		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
768 			status = -EFAULT;
769 			break;
770 		}
771 
772 		status = iomap_write_begin(iter, pos, bytes, &folio);
773 		if (unlikely(status))
774 			break;
775 
776 		page = folio_file_page(folio, pos >> PAGE_SHIFT);
777 		if (mapping_writably_mapped(iter->inode->i_mapping))
778 			flush_dcache_page(page);
779 
780 		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
781 
782 		status = iomap_write_end(iter, pos, bytes, copied, folio);
783 
784 		if (unlikely(copied != status))
785 			iov_iter_revert(i, copied - status);
786 
787 		cond_resched();
788 		if (unlikely(status == 0)) {
789 			/*
790 			 * A short copy made iomap_write_end() reject the
791 			 * thing entirely.  Might be memory poisoning
792 			 * halfway through, might be a race with munmap,
793 			 * might be severe memory pressure.
794 			 */
795 			if (copied)
796 				bytes = copied;
797 			goto again;
798 		}
799 		pos += status;
800 		written += status;
801 		length -= status;
802 
803 		balance_dirty_pages_ratelimited(iter->inode->i_mapping);
804 	} while (iov_iter_count(i) && length);
805 
806 	return written ? written : status;
807 }
808 
809 ssize_t
810 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
811 		const struct iomap_ops *ops)
812 {
813 	struct iomap_iter iter = {
814 		.inode		= iocb->ki_filp->f_mapping->host,
815 		.pos		= iocb->ki_pos,
816 		.len		= iov_iter_count(i),
817 		.flags		= IOMAP_WRITE,
818 	};
819 	int ret;
820 
821 	while ((ret = iomap_iter(&iter, ops)) > 0)
822 		iter.processed = iomap_write_iter(&iter, i);
823 	if (iter.pos == iocb->ki_pos)
824 		return ret;
825 	return iter.pos - iocb->ki_pos;
826 }
827 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
828 
829 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
830 {
831 	struct iomap *iomap = &iter->iomap;
832 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
833 	loff_t pos = iter->pos;
834 	loff_t length = iomap_length(iter);
835 	long status = 0;
836 	loff_t written = 0;
837 
838 	/* don't bother with blocks that are not shared to start with */
839 	if (!(iomap->flags & IOMAP_F_SHARED))
840 		return length;
841 	/* don't bother with holes or unwritten extents */
842 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
843 		return length;
844 
845 	do {
846 		unsigned long offset = offset_in_page(pos);
847 		unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
848 		struct folio *folio;
849 
850 		status = iomap_write_begin(iter, pos, bytes, &folio);
851 		if (unlikely(status))
852 			return status;
853 
854 		status = iomap_write_end(iter, pos, bytes, bytes, folio);
855 		if (WARN_ON_ONCE(status == 0))
856 			return -EIO;
857 
858 		cond_resched();
859 
860 		pos += status;
861 		written += status;
862 		length -= status;
863 
864 		balance_dirty_pages_ratelimited(iter->inode->i_mapping);
865 	} while (length);
866 
867 	return written;
868 }
869 
870 int
871 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
872 		const struct iomap_ops *ops)
873 {
874 	struct iomap_iter iter = {
875 		.inode		= inode,
876 		.pos		= pos,
877 		.len		= len,
878 		.flags		= IOMAP_WRITE | IOMAP_UNSHARE,
879 	};
880 	int ret;
881 
882 	while ((ret = iomap_iter(&iter, ops)) > 0)
883 		iter.processed = iomap_unshare_iter(&iter);
884 	return ret;
885 }
886 EXPORT_SYMBOL_GPL(iomap_file_unshare);
887 
888 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
889 {
890 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
891 	loff_t pos = iter->pos;
892 	loff_t length = iomap_length(iter);
893 	loff_t written = 0;
894 
895 	/* already zeroed?  we're done. */
896 	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
897 		return length;
898 
899 	do {
900 		struct folio *folio;
901 		int status;
902 		size_t offset;
903 		size_t bytes = min_t(u64, SIZE_MAX, length);
904 
905 		status = iomap_write_begin(iter, pos, bytes, &folio);
906 		if (status)
907 			return status;
908 
909 		offset = offset_in_folio(folio, pos);
910 		if (bytes > folio_size(folio) - offset)
911 			bytes = folio_size(folio) - offset;
912 
913 		folio_zero_range(folio, offset, bytes);
914 		folio_mark_accessed(folio);
915 
916 		bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
917 		if (WARN_ON_ONCE(bytes == 0))
918 			return -EIO;
919 
920 		pos += bytes;
921 		length -= bytes;
922 		written += bytes;
923 		if (did_zero)
924 			*did_zero = true;
925 	} while (length > 0);
926 
927 	return written;
928 }
929 
930 int
931 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
932 		const struct iomap_ops *ops)
933 {
934 	struct iomap_iter iter = {
935 		.inode		= inode,
936 		.pos		= pos,
937 		.len		= len,
938 		.flags		= IOMAP_ZERO,
939 	};
940 	int ret;
941 
942 	while ((ret = iomap_iter(&iter, ops)) > 0)
943 		iter.processed = iomap_zero_iter(&iter, did_zero);
944 	return ret;
945 }
946 EXPORT_SYMBOL_GPL(iomap_zero_range);
947 
948 int
949 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
950 		const struct iomap_ops *ops)
951 {
952 	unsigned int blocksize = i_blocksize(inode);
953 	unsigned int off = pos & (blocksize - 1);
954 
955 	/* Block boundary? Nothing to do */
956 	if (!off)
957 		return 0;
958 	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
959 }
960 EXPORT_SYMBOL_GPL(iomap_truncate_page);
961 
962 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
963 		struct folio *folio)
964 {
965 	loff_t length = iomap_length(iter);
966 	int ret;
967 
968 	if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
969 		ret = __block_write_begin_int(folio, iter->pos, length, NULL,
970 					      &iter->iomap);
971 		if (ret)
972 			return ret;
973 		block_commit_write(&folio->page, 0, length);
974 	} else {
975 		WARN_ON_ONCE(!folio_test_uptodate(folio));
976 		folio_mark_dirty(folio);
977 	}
978 
979 	return length;
980 }
981 
982 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
983 {
984 	struct iomap_iter iter = {
985 		.inode		= file_inode(vmf->vma->vm_file),
986 		.flags		= IOMAP_WRITE | IOMAP_FAULT,
987 	};
988 	struct folio *folio = page_folio(vmf->page);
989 	ssize_t ret;
990 
991 	folio_lock(folio);
992 	ret = folio_mkwrite_check_truncate(folio, iter.inode);
993 	if (ret < 0)
994 		goto out_unlock;
995 	iter.pos = folio_pos(folio);
996 	iter.len = ret;
997 	while ((ret = iomap_iter(&iter, ops)) > 0)
998 		iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
999 
1000 	if (ret < 0)
1001 		goto out_unlock;
1002 	folio_wait_stable(folio);
1003 	return VM_FAULT_LOCKED;
1004 out_unlock:
1005 	folio_unlock(folio);
1006 	return block_page_mkwrite_return(ret);
1007 }
1008 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1009 
1010 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1011 		size_t len, int error)
1012 {
1013 	struct iomap_page *iop = to_iomap_page(folio);
1014 
1015 	if (error) {
1016 		folio_set_error(folio);
1017 		mapping_set_error(inode->i_mapping, error);
1018 	}
1019 
1020 	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
1021 	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
1022 
1023 	if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
1024 		folio_end_writeback(folio);
1025 }
1026 
1027 /*
1028  * We're now finished for good with this ioend structure.  Update the page
1029  * state, release holds on bios, and finally free up memory.  Do not use the
1030  * ioend after this.
1031  */
1032 static u32
1033 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1034 {
1035 	struct inode *inode = ioend->io_inode;
1036 	struct bio *bio = &ioend->io_inline_bio;
1037 	struct bio *last = ioend->io_bio, *next;
1038 	u64 start = bio->bi_iter.bi_sector;
1039 	loff_t offset = ioend->io_offset;
1040 	bool quiet = bio_flagged(bio, BIO_QUIET);
1041 	u32 folio_count = 0;
1042 
1043 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
1044 		struct folio_iter fi;
1045 
1046 		/*
1047 		 * For the last bio, bi_private points to the ioend, so we
1048 		 * need to explicitly end the iteration here.
1049 		 */
1050 		if (bio == last)
1051 			next = NULL;
1052 		else
1053 			next = bio->bi_private;
1054 
1055 		/* walk all folios in bio, ending page IO on them */
1056 		bio_for_each_folio_all(fi, bio) {
1057 			iomap_finish_folio_write(inode, fi.folio, fi.length,
1058 					error);
1059 			folio_count++;
1060 		}
1061 		bio_put(bio);
1062 	}
1063 	/* The ioend has been freed by bio_put() */
1064 
1065 	if (unlikely(error && !quiet)) {
1066 		printk_ratelimited(KERN_ERR
1067 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1068 			inode->i_sb->s_id, inode->i_ino, offset, start);
1069 	}
1070 	return folio_count;
1071 }
1072 
1073 /*
1074  * Ioend completion routine for merged bios. This can only be called from task
1075  * contexts as merged ioends can be of unbound length. Hence we have to break up
1076  * the writeback completions into manageable chunks to avoid long scheduler
1077  * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1078  * good batch processing throughput without creating adverse scheduler latency
1079  * conditions.
1080  */
1081 void
1082 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1083 {
1084 	struct list_head tmp;
1085 	u32 completions;
1086 
1087 	might_sleep();
1088 
1089 	list_replace_init(&ioend->io_list, &tmp);
1090 	completions = iomap_finish_ioend(ioend, error);
1091 
1092 	while (!list_empty(&tmp)) {
1093 		if (completions > IOEND_BATCH_SIZE * 8) {
1094 			cond_resched();
1095 			completions = 0;
1096 		}
1097 		ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1098 		list_del_init(&ioend->io_list);
1099 		completions += iomap_finish_ioend(ioend, error);
1100 	}
1101 }
1102 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1103 
1104 /*
1105  * We can merge two adjacent ioends if they have the same set of work to do.
1106  */
1107 static bool
1108 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1109 {
1110 	if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1111 		return false;
1112 	if ((ioend->io_flags & IOMAP_F_SHARED) ^
1113 	    (next->io_flags & IOMAP_F_SHARED))
1114 		return false;
1115 	if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1116 	    (next->io_type == IOMAP_UNWRITTEN))
1117 		return false;
1118 	if (ioend->io_offset + ioend->io_size != next->io_offset)
1119 		return false;
1120 	/*
1121 	 * Do not merge physically discontiguous ioends. The filesystem
1122 	 * completion functions will have to iterate the physical
1123 	 * discontiguities even if we merge the ioends at a logical level, so
1124 	 * we don't gain anything by merging physical discontiguities here.
1125 	 *
1126 	 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1127 	 * submission so does not point to the start sector of the bio at
1128 	 * completion.
1129 	 */
1130 	if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1131 		return false;
1132 	return true;
1133 }
1134 
1135 void
1136 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1137 {
1138 	struct iomap_ioend *next;
1139 
1140 	INIT_LIST_HEAD(&ioend->io_list);
1141 
1142 	while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1143 			io_list))) {
1144 		if (!iomap_ioend_can_merge(ioend, next))
1145 			break;
1146 		list_move_tail(&next->io_list, &ioend->io_list);
1147 		ioend->io_size += next->io_size;
1148 	}
1149 }
1150 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1151 
1152 static int
1153 iomap_ioend_compare(void *priv, const struct list_head *a,
1154 		const struct list_head *b)
1155 {
1156 	struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1157 	struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1158 
1159 	if (ia->io_offset < ib->io_offset)
1160 		return -1;
1161 	if (ia->io_offset > ib->io_offset)
1162 		return 1;
1163 	return 0;
1164 }
1165 
1166 void
1167 iomap_sort_ioends(struct list_head *ioend_list)
1168 {
1169 	list_sort(NULL, ioend_list, iomap_ioend_compare);
1170 }
1171 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1172 
1173 static void iomap_writepage_end_bio(struct bio *bio)
1174 {
1175 	struct iomap_ioend *ioend = bio->bi_private;
1176 
1177 	iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1178 }
1179 
1180 /*
1181  * Submit the final bio for an ioend.
1182  *
1183  * If @error is non-zero, it means that we have a situation where some part of
1184  * the submission process has failed after we've marked pages for writeback
1185  * and unlocked them.  In this situation, we need to fail the bio instead of
1186  * submitting it.  This typically only happens on a filesystem shutdown.
1187  */
1188 static int
1189 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1190 		int error)
1191 {
1192 	ioend->io_bio->bi_private = ioend;
1193 	ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1194 
1195 	if (wpc->ops->prepare_ioend)
1196 		error = wpc->ops->prepare_ioend(ioend, error);
1197 	if (error) {
1198 		/*
1199 		 * If we're failing the IO now, just mark the ioend with an
1200 		 * error and finish it.  This will run IO completion immediately
1201 		 * as there is only one reference to the ioend at this point in
1202 		 * time.
1203 		 */
1204 		ioend->io_bio->bi_status = errno_to_blk_status(error);
1205 		bio_endio(ioend->io_bio);
1206 		return error;
1207 	}
1208 
1209 	submit_bio(ioend->io_bio);
1210 	return 0;
1211 }
1212 
1213 static struct iomap_ioend *
1214 iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1215 		loff_t offset, sector_t sector, struct writeback_control *wbc)
1216 {
1217 	struct iomap_ioend *ioend;
1218 	struct bio *bio;
1219 
1220 	bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1221 			       REQ_OP_WRITE | wbc_to_write_flags(wbc),
1222 			       GFP_NOFS, &iomap_ioend_bioset);
1223 	bio->bi_iter.bi_sector = sector;
1224 	wbc_init_bio(wbc, bio);
1225 
1226 	ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1227 	INIT_LIST_HEAD(&ioend->io_list);
1228 	ioend->io_type = wpc->iomap.type;
1229 	ioend->io_flags = wpc->iomap.flags;
1230 	ioend->io_inode = inode;
1231 	ioend->io_size = 0;
1232 	ioend->io_folios = 0;
1233 	ioend->io_offset = offset;
1234 	ioend->io_bio = bio;
1235 	ioend->io_sector = sector;
1236 	return ioend;
1237 }
1238 
1239 /*
1240  * Allocate a new bio, and chain the old bio to the new one.
1241  *
1242  * Note that we have to perform the chaining in this unintuitive order
1243  * so that the bi_private linkage is set up in the right direction for the
1244  * traversal in iomap_finish_ioend().
1245  */
1246 static struct bio *
1247 iomap_chain_bio(struct bio *prev)
1248 {
1249 	struct bio *new;
1250 
1251 	new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1252 	bio_clone_blkg_association(new, prev);
1253 	new->bi_iter.bi_sector = bio_end_sector(prev);
1254 
1255 	bio_chain(prev, new);
1256 	bio_get(prev);		/* for iomap_finish_ioend */
1257 	submit_bio(prev);
1258 	return new;
1259 }
1260 
1261 static bool
1262 iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1263 		sector_t sector)
1264 {
1265 	if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1266 	    (wpc->ioend->io_flags & IOMAP_F_SHARED))
1267 		return false;
1268 	if (wpc->iomap.type != wpc->ioend->io_type)
1269 		return false;
1270 	if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1271 		return false;
1272 	if (sector != bio_end_sector(wpc->ioend->io_bio))
1273 		return false;
1274 	/*
1275 	 * Limit ioend bio chain lengths to minimise IO completion latency. This
1276 	 * also prevents long tight loops ending page writeback on all the
1277 	 * folios in the ioend.
1278 	 */
1279 	if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1280 		return false;
1281 	return true;
1282 }
1283 
1284 /*
1285  * Test to see if we have an existing ioend structure that we could append to
1286  * first; otherwise finish off the current ioend and start another.
1287  */
1288 static void
1289 iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1290 		struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1291 		struct writeback_control *wbc, struct list_head *iolist)
1292 {
1293 	sector_t sector = iomap_sector(&wpc->iomap, pos);
1294 	unsigned len = i_blocksize(inode);
1295 	size_t poff = offset_in_folio(folio, pos);
1296 
1297 	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1298 		if (wpc->ioend)
1299 			list_add(&wpc->ioend->io_list, iolist);
1300 		wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1301 	}
1302 
1303 	if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1304 		wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1305 		bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
1306 	}
1307 
1308 	if (iop)
1309 		atomic_add(len, &iop->write_bytes_pending);
1310 	wpc->ioend->io_size += len;
1311 	wbc_account_cgroup_owner(wbc, &folio->page, len);
1312 }
1313 
1314 /*
1315  * We implement an immediate ioend submission policy here to avoid needing to
1316  * chain multiple ioends and hence nest mempool allocations which can violate
1317  * the forward progress guarantees we need to provide. The current ioend we're
1318  * adding blocks to is cached in the writepage context, and if the new block
1319  * doesn't append to the cached ioend, it will create a new ioend and cache that
1320  * instead.
1321  *
1322  * If a new ioend is created and cached, the old ioend is returned and queued
1323  * locally for submission once the entire page is processed or an error has been
1324  * detected.  While ioends are submitted immediately after they are completed,
1325  * batching optimisations are provided by higher level block plugging.
1326  *
1327  * At the end of a writeback pass, there will be a cached ioend remaining on the
1328  * writepage context that the caller will need to submit.
1329  */
1330 static int
1331 iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1332 		struct writeback_control *wbc, struct inode *inode,
1333 		struct folio *folio, u64 end_pos)
1334 {
1335 	struct iomap_page *iop = iomap_page_create(inode, folio);
1336 	struct iomap_ioend *ioend, *next;
1337 	unsigned len = i_blocksize(inode);
1338 	unsigned nblocks = i_blocks_per_folio(inode, folio);
1339 	u64 pos = folio_pos(folio);
1340 	int error = 0, count = 0, i;
1341 	LIST_HEAD(submit_list);
1342 
1343 	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
1344 
1345 	/*
1346 	 * Walk through the folio to find areas to write back. If we
1347 	 * run off the end of the current map or find the current map
1348 	 * invalid, grab a new one.
1349 	 */
1350 	for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1351 		if (iop && !test_bit(i, iop->uptodate))
1352 			continue;
1353 
1354 		error = wpc->ops->map_blocks(wpc, inode, pos);
1355 		if (error)
1356 			break;
1357 		if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1358 			continue;
1359 		if (wpc->iomap.type == IOMAP_HOLE)
1360 			continue;
1361 		iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
1362 				 &submit_list);
1363 		count++;
1364 	}
1365 	if (count)
1366 		wpc->ioend->io_folios++;
1367 
1368 	WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1369 	WARN_ON_ONCE(!folio_test_locked(folio));
1370 	WARN_ON_ONCE(folio_test_writeback(folio));
1371 	WARN_ON_ONCE(folio_test_dirty(folio));
1372 
1373 	/*
1374 	 * We cannot cancel the ioend directly here on error.  We may have
1375 	 * already set other pages under writeback and hence we have to run I/O
1376 	 * completion to mark the error state of the pages under writeback
1377 	 * appropriately.
1378 	 */
1379 	if (unlikely(error)) {
1380 		/*
1381 		 * Let the filesystem know what portion of the current page
1382 		 * failed to map. If the page hasn't been added to ioend, it
1383 		 * won't be affected by I/O completion and we must unlock it
1384 		 * now.
1385 		 */
1386 		if (wpc->ops->discard_folio)
1387 			wpc->ops->discard_folio(folio, pos);
1388 		if (!count) {
1389 			folio_clear_uptodate(folio);
1390 			folio_unlock(folio);
1391 			goto done;
1392 		}
1393 	}
1394 
1395 	folio_start_writeback(folio);
1396 	folio_unlock(folio);
1397 
1398 	/*
1399 	 * Preserve the original error if there was one; catch
1400 	 * submission errors here and propagate into subsequent ioend
1401 	 * submissions.
1402 	 */
1403 	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1404 		int error2;
1405 
1406 		list_del_init(&ioend->io_list);
1407 		error2 = iomap_submit_ioend(wpc, ioend, error);
1408 		if (error2 && !error)
1409 			error = error2;
1410 	}
1411 
1412 	/*
1413 	 * We can end up here with no error and nothing to write only if we race
1414 	 * with a partial page truncate on a sub-page block sized filesystem.
1415 	 */
1416 	if (!count)
1417 		folio_end_writeback(folio);
1418 done:
1419 	mapping_set_error(folio->mapping, error);
1420 	return error;
1421 }
1422 
1423 /*
1424  * Write out a dirty page.
1425  *
1426  * For delalloc space on the page, we need to allocate space and flush it.
1427  * For unwritten space on the page, we need to start the conversion to
1428  * regular allocated space.
1429  */
1430 static int
1431 iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1432 {
1433 	struct folio *folio = page_folio(page);
1434 	struct iomap_writepage_ctx *wpc = data;
1435 	struct inode *inode = folio->mapping->host;
1436 	u64 end_pos, isize;
1437 
1438 	trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1439 
1440 	/*
1441 	 * Refuse to write the folio out if we're called from reclaim context.
1442 	 *
1443 	 * This avoids stack overflows when called from deeply used stacks in
1444 	 * random callers for direct reclaim or memcg reclaim.  We explicitly
1445 	 * allow reclaim from kswapd as the stack usage there is relatively low.
1446 	 *
1447 	 * This should never happen except in the case of a VM regression so
1448 	 * warn about it.
1449 	 */
1450 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1451 			PF_MEMALLOC))
1452 		goto redirty;
1453 
1454 	/*
1455 	 * Is this folio beyond the end of the file?
1456 	 *
1457 	 * The folio index is less than the end_index, adjust the end_pos
1458 	 * to the highest offset that this folio should represent.
1459 	 * -----------------------------------------------------
1460 	 * |			file mapping	       | <EOF> |
1461 	 * -----------------------------------------------------
1462 	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
1463 	 * ^--------------------------------^----------|--------
1464 	 * |     desired writeback range    |      see else    |
1465 	 * ---------------------------------^------------------|
1466 	 */
1467 	isize = i_size_read(inode);
1468 	end_pos = folio_pos(folio) + folio_size(folio);
1469 	if (end_pos > isize) {
1470 		/*
1471 		 * Check whether the page to write out is beyond or straddles
1472 		 * i_size or not.
1473 		 * -------------------------------------------------------
1474 		 * |		file mapping		        | <EOF>  |
1475 		 * -------------------------------------------------------
1476 		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
1477 		 * ^--------------------------------^-----------|---------
1478 		 * |				    |      Straddles     |
1479 		 * ---------------------------------^-----------|--------|
1480 		 */
1481 		size_t poff = offset_in_folio(folio, isize);
1482 		pgoff_t end_index = isize >> PAGE_SHIFT;
1483 
1484 		/*
1485 		 * Skip the page if it's fully outside i_size, e.g. due to a
1486 		 * truncate operation that's in progress. We must redirty the
1487 		 * page so that reclaim stops reclaiming it. Otherwise
1488 		 * iomap_vm_releasepage() is called on it and gets confused.
1489 		 *
1490 		 * Note that the end_index is unsigned long.  If the given
1491 		 * offset is greater than 16TB on a 32-bit system then if we
1492 		 * checked if the page is fully outside i_size with
1493 		 * "if (page->index >= end_index + 1)", "end_index + 1" would
1494 		 * overflow and evaluate to 0.  Hence this page would be
1495 		 * redirtied and written out repeatedly, which would result in
1496 		 * an infinite loop; the user program performing this operation
1497 		 * would hang.  Instead, we can detect this situation by
1498 		 * checking if the page is totally beyond i_size or if its
1499 		 * offset is just equal to the EOF.
1500 		 */
1501 		if (folio->index > end_index ||
1502 		    (folio->index == end_index && poff == 0))
1503 			goto redirty;
1504 
1505 		/*
1506 		 * The page straddles i_size.  It must be zeroed out on each
1507 		 * and every writepage invocation because it may be mmapped.
1508 		 * "A file is mapped in multiples of the page size.  For a file
1509 		 * that is not a multiple of the page size, the remaining
1510 		 * memory is zeroed when mapped, and writes to that region are
1511 		 * not written out to the file."
1512 		 */
1513 		folio_zero_segment(folio, poff, folio_size(folio));
1514 		end_pos = isize;
1515 	}
1516 
1517 	return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1518 
1519 redirty:
1520 	folio_redirty_for_writepage(wbc, folio);
1521 	folio_unlock(folio);
1522 	return 0;
1523 }
1524 
1525 int
1526 iomap_writepage(struct page *page, struct writeback_control *wbc,
1527 		struct iomap_writepage_ctx *wpc,
1528 		const struct iomap_writeback_ops *ops)
1529 {
1530 	int ret;
1531 
1532 	wpc->ops = ops;
1533 	ret = iomap_do_writepage(page, wbc, wpc);
1534 	if (!wpc->ioend)
1535 		return ret;
1536 	return iomap_submit_ioend(wpc, wpc->ioend, ret);
1537 }
1538 EXPORT_SYMBOL_GPL(iomap_writepage);
1539 
1540 int
1541 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1542 		struct iomap_writepage_ctx *wpc,
1543 		const struct iomap_writeback_ops *ops)
1544 {
1545 	int			ret;
1546 
1547 	wpc->ops = ops;
1548 	ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1549 	if (!wpc->ioend)
1550 		return ret;
1551 	return iomap_submit_ioend(wpc, wpc->ioend, ret);
1552 }
1553 EXPORT_SYMBOL_GPL(iomap_writepages);
1554 
1555 static int __init iomap_init(void)
1556 {
1557 	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1558 			   offsetof(struct iomap_ioend, io_inline_bio),
1559 			   BIOSET_NEED_BVECS);
1560 }
1561 fs_initcall(iomap_init);
1562