xref: /linux/fs/iomap/buffered-io.c (revision fd5bed798f45eb3a178ad527b43ab92705faaf8a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (C) 2016-2023 Christoph Hellwig.
5  */
6 #include <linux/iomap.h>
7 #include <linux/buffer_head.h>
8 #include <linux/writeback.h>
9 #include <linux/swap.h>
10 #include <linux/migrate.h>
11 #include <linux/fserror.h>
12 #include "internal.h"
13 #include "trace.h"
14 
15 #include "../internal.h"
16 
17 /*
18  * Structure allocated for each folio to track per-block uptodate, dirty state
19  * and I/O completions.
20  */
21 struct iomap_folio_state {
22 	spinlock_t		state_lock;
23 	unsigned int		read_bytes_pending;
24 	atomic_t		write_bytes_pending;
25 
26 	/*
27 	 * Each block has two bits in this bitmap:
28 	 * Bits [0..blocks_per_folio) has the uptodate status.
29 	 * Bits [b_p_f...(2*b_p_f))   has the dirty status.
30 	 */
31 	unsigned long		state[];
32 };
33 
34 static inline bool ifs_is_fully_uptodate(struct folio *folio,
35 		struct iomap_folio_state *ifs)
36 {
37 	struct inode *inode = folio->mapping->host;
38 
39 	return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
40 }
41 
42 /*
43  * Find the next uptodate block in the folio. end_blk is inclusive.
44  * If no uptodate block is found, this will return end_blk + 1.
45  */
46 static unsigned ifs_next_uptodate_block(struct folio *folio,
47 		unsigned start_blk, unsigned end_blk)
48 {
49 	struct iomap_folio_state *ifs = folio->private;
50 
51 	return find_next_bit(ifs->state, end_blk + 1, start_blk);
52 }
53 
54 /*
55  * Find the next non-uptodate block in the folio. end_blk is inclusive.
56  * If no non-uptodate block is found, this will return end_blk + 1.
57  */
58 static unsigned ifs_next_nonuptodate_block(struct folio *folio,
59 		unsigned start_blk, unsigned end_blk)
60 {
61 	struct iomap_folio_state *ifs = folio->private;
62 
63 	return find_next_zero_bit(ifs->state, end_blk + 1, start_blk);
64 }
65 
66 static bool ifs_set_range_uptodate(struct folio *folio,
67 		struct iomap_folio_state *ifs, size_t off, size_t len)
68 {
69 	struct inode *inode = folio->mapping->host;
70 	unsigned int first_blk = off >> inode->i_blkbits;
71 	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
72 	unsigned int nr_blks = last_blk - first_blk + 1;
73 
74 	bitmap_set(ifs->state, first_blk, nr_blks);
75 	return ifs_is_fully_uptodate(folio, ifs);
76 }
77 
78 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
79 		size_t len)
80 {
81 	struct iomap_folio_state *ifs = folio->private;
82 	unsigned long flags;
83 	bool uptodate = true;
84 
85 	if (folio_test_uptodate(folio))
86 		return;
87 
88 	if (ifs) {
89 		spin_lock_irqsave(&ifs->state_lock, flags);
90 		uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
91 		spin_unlock_irqrestore(&ifs->state_lock, flags);
92 	}
93 
94 	if (uptodate)
95 		folio_mark_uptodate(folio);
96 }
97 
98 /*
99  * Find the next dirty block in the folio. end_blk is inclusive.
100  * If no dirty block is found, this will return end_blk + 1.
101  */
102 static unsigned ifs_next_dirty_block(struct folio *folio,
103 		unsigned start_blk, unsigned end_blk)
104 {
105 	struct iomap_folio_state *ifs = folio->private;
106 	struct inode *inode = folio->mapping->host;
107 	unsigned int blks = i_blocks_per_folio(inode, folio);
108 
109 	return find_next_bit(ifs->state, blks + end_blk + 1,
110 			blks + start_blk) - blks;
111 }
112 
113 /*
114  * Find the next clean block in the folio. end_blk is inclusive.
115  * If no clean block is found, this will return end_blk + 1.
116  */
117 static unsigned ifs_next_clean_block(struct folio *folio,
118 		unsigned start_blk, unsigned end_blk)
119 {
120 	struct iomap_folio_state *ifs = folio->private;
121 	struct inode *inode = folio->mapping->host;
122 	unsigned int blks = i_blocks_per_folio(inode, folio);
123 
124 	return find_next_zero_bit(ifs->state, blks + end_blk + 1,
125 			blks + start_blk) - blks;
126 }
127 
128 static unsigned ifs_find_dirty_range(struct folio *folio,
129 		struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
130 {
131 	struct inode *inode = folio->mapping->host;
132 	unsigned start_blk =
133 		offset_in_folio(folio, *range_start) >> inode->i_blkbits;
134 	unsigned end_blk = min_not_zero(
135 		offset_in_folio(folio, range_end) >> inode->i_blkbits,
136 		i_blocks_per_folio(inode, folio)) - 1;
137 	unsigned nblks;
138 
139 	start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
140 	if (start_blk > end_blk)
141 		return 0;
142 	if (start_blk == end_blk)
143 		nblks = 1;
144 	else
145 		nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
146 				start_blk;
147 
148 	*range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
149 	return nblks << inode->i_blkbits;
150 }
151 
152 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
153 		u64 range_end)
154 {
155 	struct iomap_folio_state *ifs = folio->private;
156 
157 	if (*range_start >= range_end)
158 		return 0;
159 
160 	if (ifs)
161 		return ifs_find_dirty_range(folio, ifs, range_start, range_end);
162 	return range_end - *range_start;
163 }
164 
165 static void ifs_clear_range_dirty(struct folio *folio,
166 		struct iomap_folio_state *ifs, size_t off, size_t len)
167 {
168 	struct inode *inode = folio->mapping->host;
169 	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
170 	unsigned int first_blk = (off >> inode->i_blkbits);
171 	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
172 	unsigned int nr_blks = last_blk - first_blk + 1;
173 	unsigned long flags;
174 
175 	spin_lock_irqsave(&ifs->state_lock, flags);
176 	bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
177 	spin_unlock_irqrestore(&ifs->state_lock, flags);
178 }
179 
180 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
181 {
182 	struct iomap_folio_state *ifs = folio->private;
183 
184 	if (ifs)
185 		ifs_clear_range_dirty(folio, ifs, off, len);
186 }
187 
188 static void ifs_set_range_dirty(struct folio *folio,
189 		struct iomap_folio_state *ifs, size_t off, size_t len)
190 {
191 	struct inode *inode = folio->mapping->host;
192 	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
193 	unsigned int first_blk = (off >> inode->i_blkbits);
194 	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
195 	unsigned int nr_blks = last_blk - first_blk + 1;
196 	unsigned long flags;
197 
198 	spin_lock_irqsave(&ifs->state_lock, flags);
199 	bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
200 	spin_unlock_irqrestore(&ifs->state_lock, flags);
201 }
202 
203 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
204 {
205 	struct iomap_folio_state *ifs = folio->private;
206 
207 	if (ifs)
208 		ifs_set_range_dirty(folio, ifs, off, len);
209 }
210 
211 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
212 		struct folio *folio, unsigned int flags)
213 {
214 	struct iomap_folio_state *ifs = folio->private;
215 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
216 	gfp_t gfp;
217 
218 	if (ifs || nr_blocks <= 1)
219 		return ifs;
220 
221 	if (flags & IOMAP_NOWAIT)
222 		gfp = GFP_NOWAIT;
223 	else
224 		gfp = GFP_NOFS | __GFP_NOFAIL;
225 
226 	/*
227 	 * ifs->state tracks two sets of state flags when the
228 	 * filesystem block size is smaller than the folio size.
229 	 * The first state tracks per-block uptodate and the
230 	 * second tracks per-block dirty state.
231 	 */
232 	ifs = kzalloc_flex(*ifs, state, BITS_TO_LONGS(2 * nr_blocks), gfp);
233 	if (!ifs)
234 		return ifs;
235 
236 	spin_lock_init(&ifs->state_lock);
237 	if (folio_test_uptodate(folio))
238 		bitmap_set(ifs->state, 0, nr_blocks);
239 	if (folio_test_dirty(folio))
240 		bitmap_set(ifs->state, nr_blocks, nr_blocks);
241 	folio_attach_private(folio, ifs);
242 
243 	return ifs;
244 }
245 
246 static void ifs_free(struct folio *folio)
247 {
248 	struct iomap_folio_state *ifs = folio_detach_private(folio);
249 
250 	if (!ifs)
251 		return;
252 	WARN_ON_ONCE(ifs->read_bytes_pending != 0);
253 	WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
254 	WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
255 			folio_test_uptodate(folio));
256 	kfree(ifs);
257 }
258 
259 /*
260  * Calculate how many bytes to truncate based off the number of blocks to
261  * truncate and the end position to start truncating from.
262  */
263 static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits,
264 		unsigned blocks_truncated)
265 {
266 	unsigned block_size = 1 << block_bits;
267 	unsigned block_offset = end_pos & (block_size - 1);
268 
269 	if (!block_offset)
270 		return blocks_truncated << block_bits;
271 
272 	return ((blocks_truncated - 1) << block_bits) + block_offset;
273 }
274 
275 /*
276  * Calculate the range inside the folio that we actually need to read.
277  */
278 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
279 		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
280 {
281 	struct iomap_folio_state *ifs = folio->private;
282 	loff_t orig_pos = *pos;
283 	loff_t isize = i_size_read(inode);
284 	unsigned block_bits = inode->i_blkbits;
285 	unsigned block_size = (1 << block_bits);
286 	size_t poff = offset_in_folio(folio, *pos);
287 	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
288 	size_t orig_plen = plen;
289 	unsigned first = poff >> block_bits;
290 	unsigned last = (poff + plen - 1) >> block_bits;
291 
292 	/*
293 	 * If the block size is smaller than the page size, we need to check the
294 	 * per-block uptodate status and adjust the offset and length if needed
295 	 * to avoid reading in already uptodate ranges.
296 	 */
297 	if (ifs) {
298 		unsigned int next, blocks_skipped;
299 
300 		next = ifs_next_nonuptodate_block(folio, first, last);
301 		blocks_skipped = next - first;
302 
303 		if (blocks_skipped) {
304 			unsigned long block_offset = *pos & (block_size - 1);
305 			unsigned bytes_skipped =
306 				(blocks_skipped << block_bits) - block_offset;
307 
308 			*pos += bytes_skipped;
309 			poff += bytes_skipped;
310 			plen -= bytes_skipped;
311 		}
312 		first = next;
313 
314 		/* truncate len if we find any trailing uptodate block(s) */
315 		if (++next <= last) {
316 			next = ifs_next_uptodate_block(folio, next, last);
317 			if (next <= last) {
318 				plen -= iomap_bytes_to_truncate(*pos + plen,
319 						block_bits, last - next + 1);
320 				last = next - 1;
321 			}
322 		}
323 	}
324 
325 	/*
326 	 * If the extent spans the block that contains the i_size, we need to
327 	 * handle both halves separately so that we properly zero data in the
328 	 * page cache for blocks that are entirely outside of i_size.
329 	 */
330 	if (orig_pos <= isize && orig_pos + orig_plen > isize) {
331 		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
332 
333 		if (first <= end && last > end)
334 			plen -= iomap_bytes_to_truncate(*pos + plen, block_bits,
335 					last - end);
336 	}
337 
338 	*offp = poff;
339 	*lenp = plen;
340 }
341 
342 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
343 		loff_t pos)
344 {
345 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
346 
347 	return srcmap->type != IOMAP_MAPPED ||
348 		(srcmap->flags & IOMAP_F_NEW) ||
349 		pos >= i_size_read(iter->inode);
350 }
351 
352 /**
353  * iomap_read_inline_data - copy inline data into the page cache
354  * @iter: iteration structure
355  * @folio: folio to copy to
356  *
357  * Copy the inline data in @iter into @folio and zero out the rest of the folio.
358  * Only a single IOMAP_INLINE extent is allowed at the end of each file.
359  * Returns zero for success to complete the read, or the usual negative errno.
360  */
361 static int iomap_read_inline_data(const struct iomap_iter *iter,
362 		struct folio *folio)
363 {
364 	const struct iomap *iomap = iomap_iter_srcmap(iter);
365 	size_t size = i_size_read(iter->inode) - iomap->offset;
366 	size_t offset = offset_in_folio(folio, iomap->offset);
367 
368 	if (WARN_ON_ONCE(!iomap->inline_data))
369 		return -EIO;
370 
371 	if (folio_test_uptodate(folio))
372 		return 0;
373 
374 	if (WARN_ON_ONCE(size > iomap->length)) {
375 		fserror_report_io(iter->inode, FSERR_BUFFERED_READ,
376 				  iomap->offset, size, -EIO, GFP_NOFS);
377 		return -EIO;
378 	}
379 	if (offset > 0)
380 		ifs_alloc(iter->inode, folio, iter->flags);
381 
382 	folio_fill_tail(folio, offset, iomap->inline_data, size);
383 	iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
384 	return 0;
385 }
386 
387 void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
388 		int error)
389 {
390 	struct iomap_folio_state *ifs = folio->private;
391 	bool uptodate = !error;
392 	bool finished = true;
393 
394 	if (ifs) {
395 		unsigned long flags;
396 
397 		spin_lock_irqsave(&ifs->state_lock, flags);
398 		if (!error)
399 			uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
400 		ifs->read_bytes_pending -= len;
401 		finished = !ifs->read_bytes_pending;
402 		spin_unlock_irqrestore(&ifs->state_lock, flags);
403 	}
404 
405 	if (error)
406 		fserror_report_io(folio->mapping->host, FSERR_BUFFERED_READ,
407 				  folio_pos(folio) + off, len, error,
408 				  GFP_ATOMIC);
409 
410 	if (finished)
411 		folio_end_read(folio, uptodate);
412 }
413 EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
414 
415 static void iomap_read_init(struct folio *folio)
416 {
417 	struct iomap_folio_state *ifs = folio->private;
418 
419 	if (ifs) {
420 		/*
421 		 * ifs->read_bytes_pending is used to track how many bytes are
422 		 * read in asynchronously by the IO helper. We need to track
423 		 * this so that we can know when the IO helper has finished
424 		 * reading in all the necessary ranges of the folio and can end
425 		 * the read.
426 		 *
427 		 * Increase ->read_bytes_pending by the folio size to start.
428 		 * We'll subtract any uptodate / zeroed ranges that did not
429 		 * require IO in iomap_read_end() after we're done processing
430 		 * the folio.
431 		 *
432 		 * We do this because otherwise, we would have to increment
433 		 * ifs->read_bytes_pending every time a range in the folio needs
434 		 * to be read in, which can get expensive since the spinlock
435 		 * needs to be held whenever modifying ifs->read_bytes_pending.
436 		 */
437 		spin_lock_irq(&ifs->state_lock);
438 		WARN_ON_ONCE(ifs->read_bytes_pending != 0);
439 		ifs->read_bytes_pending = folio_size(folio);
440 		spin_unlock_irq(&ifs->state_lock);
441 	}
442 }
443 
444 /*
445  * This ends IO if no bytes were submitted to an IO helper.
446  *
447  * Otherwise, this calibrates ifs->read_bytes_pending to represent only the
448  * submitted bytes (see comment in iomap_read_init()). If all bytes submitted
449  * have already been completed by the IO helper, then this will end the read.
450  * Else the IO helper will end the read after all submitted ranges have been
451  * read.
452  */
453 static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
454 {
455 	struct iomap_folio_state *ifs = folio->private;
456 
457 	if (ifs) {
458 		bool end_read, uptodate;
459 
460 		spin_lock_irq(&ifs->state_lock);
461 		if (!ifs->read_bytes_pending) {
462 			WARN_ON_ONCE(bytes_submitted);
463 			spin_unlock_irq(&ifs->state_lock);
464 			folio_unlock(folio);
465 			return;
466 		}
467 
468 		/*
469 		 * Subtract any bytes that were initially accounted to
470 		 * read_bytes_pending but skipped for IO.
471 		 */
472 		ifs->read_bytes_pending -= folio_size(folio) - bytes_submitted;
473 
474 		/*
475 		 * If !ifs->read_bytes_pending, this means all pending reads by
476 		 * the IO helper have already completed, which means we need to
477 		 * end the folio read here. If ifs->read_bytes_pending != 0,
478 		 * the IO helper will end the folio read.
479 		 */
480 		end_read = !ifs->read_bytes_pending;
481 		if (end_read)
482 			uptodate = ifs_is_fully_uptodate(folio, ifs);
483 		spin_unlock_irq(&ifs->state_lock);
484 		if (end_read)
485 			folio_end_read(folio, uptodate);
486 	} else {
487 		/*
488 		 * If a folio without an ifs is submitted to the IO helper, the
489 		 * read must be on the entire folio and the IO helper takes
490 		 * ownership of the folio. This means we should only enter
491 		 * iomap_read_end() for the !ifs case if no bytes were submitted
492 		 * to the IO helper, in which case we are responsible for
493 		 * unlocking the folio here.
494 		 */
495 		WARN_ON_ONCE(bytes_submitted);
496 		folio_unlock(folio);
497 	}
498 }
499 
500 static int iomap_read_folio_iter(struct iomap_iter *iter,
501 		struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted)
502 {
503 	const struct iomap *iomap = &iter->iomap;
504 	loff_t pos = iter->pos;
505 	loff_t length = iomap_length(iter);
506 	struct folio *folio = ctx->cur_folio;
507 	size_t folio_len = folio_size(folio);
508 	size_t poff, plen;
509 	loff_t pos_diff;
510 	int ret;
511 
512 	if (iomap->type == IOMAP_INLINE) {
513 		ret = iomap_read_inline_data(iter, folio);
514 		if (ret)
515 			return ret;
516 		return iomap_iter_advance(iter, length);
517 	}
518 
519 	ifs_alloc(iter->inode, folio, iter->flags);
520 
521 	length = min_t(loff_t, length, folio_len - offset_in_folio(folio, pos));
522 	while (length) {
523 		iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
524 				&plen);
525 
526 		pos_diff = pos - iter->pos;
527 		if (WARN_ON_ONCE(pos_diff + plen > length))
528 			return -EIO;
529 
530 		ret = iomap_iter_advance(iter, pos_diff);
531 		if (ret)
532 			return ret;
533 
534 		if (plen == 0)
535 			return 0;
536 
537 		/* zero post-eof blocks as the page may be mapped */
538 		if (iomap_block_needs_zeroing(iter, pos)) {
539 			folio_zero_range(folio, poff, plen);
540 			iomap_set_range_uptodate(folio, poff, plen);
541 		} else {
542 			if (!*bytes_submitted)
543 				iomap_read_init(folio);
544 			ret = ctx->ops->read_folio_range(iter, ctx, plen);
545 			if (ret < 0)
546 				fserror_report_io(iter->inode,
547 						  FSERR_BUFFERED_READ, pos,
548 						  plen, ret, GFP_NOFS);
549 			if (ret)
550 				return ret;
551 
552 			*bytes_submitted += plen;
553 			/*
554 			 * If the entire folio has been read in by the IO
555 			 * helper, then the helper owns the folio and will end
556 			 * the read on it.
557 			 */
558 			if (*bytes_submitted == folio_len)
559 				ctx->cur_folio = NULL;
560 		}
561 
562 		ret = iomap_iter_advance(iter, plen);
563 		if (ret)
564 			return ret;
565 		length -= pos_diff + plen;
566 		pos = iter->pos;
567 	}
568 	return 0;
569 }
570 
571 void iomap_read_folio(const struct iomap_ops *ops,
572 		struct iomap_read_folio_ctx *ctx, void *private)
573 {
574 	struct folio *folio = ctx->cur_folio;
575 	struct iomap_iter iter = {
576 		.inode		= folio->mapping->host,
577 		.pos		= folio_pos(folio),
578 		.len		= folio_size(folio),
579 		.private	= private,
580 	};
581 	size_t bytes_submitted = 0;
582 	int ret;
583 
584 	trace_iomap_readpage(iter.inode, 1);
585 
586 	while ((ret = iomap_iter(&iter, ops)) > 0)
587 		iter.status = iomap_read_folio_iter(&iter, ctx,
588 				&bytes_submitted);
589 
590 	if (ctx->ops->submit_read)
591 		ctx->ops->submit_read(ctx);
592 
593 	if (ctx->cur_folio)
594 		iomap_read_end(ctx->cur_folio, bytes_submitted);
595 }
596 EXPORT_SYMBOL_GPL(iomap_read_folio);
597 
598 static int iomap_readahead_iter(struct iomap_iter *iter,
599 		struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted)
600 {
601 	int ret;
602 
603 	while (iomap_length(iter)) {
604 		if (ctx->cur_folio &&
605 		    offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
606 			iomap_read_end(ctx->cur_folio, *cur_bytes_submitted);
607 			ctx->cur_folio = NULL;
608 		}
609 		if (!ctx->cur_folio) {
610 			ctx->cur_folio = readahead_folio(ctx->rac);
611 			if (WARN_ON_ONCE(!ctx->cur_folio))
612 				return -EINVAL;
613 			*cur_bytes_submitted = 0;
614 		}
615 		ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
616 		if (ret)
617 			return ret;
618 	}
619 
620 	return 0;
621 }
622 
623 /**
624  * iomap_readahead - Attempt to read pages from a file.
625  * @ops: The operations vector for the filesystem.
626  * @ctx: The ctx used for issuing readahead.
627  *
628  * This function is for filesystems to call to implement their readahead
629  * address_space operation.
630  *
631  * Context: The @ops callbacks may submit I/O (eg to read the addresses of
632  * blocks from disc), and may wait for it.  The caller may be trying to
633  * access a different page, and so sleeping excessively should be avoided.
634  * It may allocate memory, but should avoid costly allocations.  This
635  * function is called with memalloc_nofs set, so allocations will not cause
636  * the filesystem to be reentered.
637  */
638 void iomap_readahead(const struct iomap_ops *ops,
639 		struct iomap_read_folio_ctx *ctx, void *private)
640 {
641 	struct readahead_control *rac = ctx->rac;
642 	struct iomap_iter iter = {
643 		.inode	= rac->mapping->host,
644 		.pos	= readahead_pos(rac),
645 		.len	= readahead_length(rac),
646 		.private = private,
647 	};
648 	size_t cur_bytes_submitted;
649 
650 	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
651 
652 	while (iomap_iter(&iter, ops) > 0)
653 		iter.status = iomap_readahead_iter(&iter, ctx,
654 					&cur_bytes_submitted);
655 
656 	if (ctx->ops->submit_read)
657 		ctx->ops->submit_read(ctx);
658 
659 	if (ctx->cur_folio)
660 		iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
661 }
662 EXPORT_SYMBOL_GPL(iomap_readahead);
663 
664 /*
665  * iomap_is_partially_uptodate checks whether blocks within a folio are
666  * uptodate or not.
667  *
668  * Returns true if all blocks which correspond to the specified part
669  * of the folio are uptodate.
670  */
671 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
672 {
673 	struct iomap_folio_state *ifs = folio->private;
674 	struct inode *inode = folio->mapping->host;
675 	unsigned first, last;
676 
677 	if (!ifs)
678 		return false;
679 
680 	/* Caller's range may extend past the end of this folio */
681 	count = min(folio_size(folio) - from, count);
682 
683 	/* First and last blocks in range within folio */
684 	first = from >> inode->i_blkbits;
685 	last = (from + count - 1) >> inode->i_blkbits;
686 
687 	return ifs_next_nonuptodate_block(folio, first, last) > last;
688 }
689 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
690 
691 /**
692  * iomap_get_folio - get a folio reference for writing
693  * @iter: iteration structure
694  * @pos: start offset of write
695  * @len: Suggested size of folio to create.
696  *
697  * Returns a locked reference to the folio at @pos, or an error pointer if the
698  * folio could not be obtained.
699  */
700 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
701 {
702 	fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
703 
704 	if (iter->flags & IOMAP_NOWAIT)
705 		fgp |= FGP_NOWAIT;
706 	if (iter->flags & IOMAP_DONTCACHE)
707 		fgp |= FGP_DONTCACHE;
708 	fgp |= fgf_set_order(len);
709 
710 	return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
711 			fgp, mapping_gfp_mask(iter->inode->i_mapping));
712 }
713 EXPORT_SYMBOL_GPL(iomap_get_folio);
714 
715 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
716 {
717 	trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
718 			folio_size(folio));
719 
720 	/*
721 	 * If the folio is dirty, we refuse to release our metadata because
722 	 * it may be partially dirty.  Once we track per-block dirty state,
723 	 * we can release the metadata if every block is dirty.
724 	 */
725 	if (folio_test_dirty(folio))
726 		return false;
727 	ifs_free(folio);
728 	return true;
729 }
730 EXPORT_SYMBOL_GPL(iomap_release_folio);
731 
732 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
733 {
734 	trace_iomap_invalidate_folio(folio->mapping->host,
735 					folio_pos(folio) + offset, len);
736 
737 	/*
738 	 * If we're invalidating the entire folio, clear the dirty state
739 	 * from it and release it to avoid unnecessary buildup of the LRU.
740 	 */
741 	if (offset == 0 && len == folio_size(folio)) {
742 		WARN_ON_ONCE(folio_test_writeback(folio));
743 		folio_cancel_dirty(folio);
744 		ifs_free(folio);
745 	}
746 }
747 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
748 
749 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
750 {
751 	struct inode *inode = mapping->host;
752 	size_t len = folio_size(folio);
753 
754 	ifs_alloc(inode, folio, 0);
755 	iomap_set_range_dirty(folio, 0, len);
756 	return filemap_dirty_folio(mapping, folio);
757 }
758 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
759 
760 static void
761 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
762 {
763 	loff_t i_size = i_size_read(inode);
764 
765 	/*
766 	 * Only truncate newly allocated pages beyoned EOF, even if the
767 	 * write started inside the existing inode size.
768 	 */
769 	if (pos + len > i_size)
770 		truncate_pagecache_range(inode, max(pos, i_size),
771 					 pos + len - 1);
772 }
773 
774 static int __iomap_write_begin(const struct iomap_iter *iter,
775 		const struct iomap_write_ops *write_ops, size_t len,
776 		struct folio *folio)
777 {
778 	struct iomap_folio_state *ifs;
779 	loff_t pos = iter->pos;
780 	loff_t block_size = i_blocksize(iter->inode);
781 	loff_t block_start = round_down(pos, block_size);
782 	loff_t block_end = round_up(pos + len, block_size);
783 	unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
784 	size_t from = offset_in_folio(folio, pos), to = from + len;
785 	size_t poff, plen;
786 
787 	/*
788 	 * If the write or zeroing completely overlaps the current folio, then
789 	 * entire folio will be dirtied so there is no need for
790 	 * per-block state tracking structures to be attached to this folio.
791 	 * For the unshare case, we must read in the ondisk contents because we
792 	 * are not changing pagecache contents.
793 	 */
794 	if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
795 	    pos + len >= folio_next_pos(folio))
796 		return 0;
797 
798 	ifs = ifs_alloc(iter->inode, folio, iter->flags);
799 	if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
800 		return -EAGAIN;
801 
802 	if (folio_test_uptodate(folio))
803 		return 0;
804 
805 	do {
806 		iomap_adjust_read_range(iter->inode, folio, &block_start,
807 				block_end - block_start, &poff, &plen);
808 		if (plen == 0)
809 			break;
810 
811 		/*
812 		 * If the read range will be entirely overwritten by the write,
813 		 * we can skip having to zero/read it in.
814 		 */
815 		if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
816 		    to >= poff + plen)
817 			continue;
818 
819 		if (iomap_block_needs_zeroing(iter, block_start)) {
820 			if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
821 				return -EIO;
822 			folio_zero_segments(folio, poff, from, to, poff + plen);
823 		} else {
824 			int status;
825 
826 			if (iter->flags & IOMAP_NOWAIT)
827 				return -EAGAIN;
828 
829 			if (write_ops && write_ops->read_folio_range)
830 				status = write_ops->read_folio_range(iter,
831 						folio, block_start, plen);
832 			else
833 				status = iomap_bio_read_folio_range_sync(iter,
834 						folio, block_start, plen);
835 			if (status < 0)
836 				fserror_report_io(iter->inode,
837 						  FSERR_BUFFERED_READ, pos,
838 						  len, status, GFP_NOFS);
839 			if (status)
840 				return status;
841 		}
842 		iomap_set_range_uptodate(folio, poff, plen);
843 	} while ((block_start += plen) < block_end);
844 
845 	return 0;
846 }
847 
848 static struct folio *__iomap_get_folio(struct iomap_iter *iter,
849 		const struct iomap_write_ops *write_ops, size_t len)
850 {
851 	loff_t pos = iter->pos;
852 
853 	if (!mapping_large_folio_support(iter->inode->i_mapping))
854 		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
855 
856 	if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
857 		struct folio *folio = folio_batch_next(iter->fbatch);
858 
859 		if (!folio)
860 			return NULL;
861 
862 		/*
863 		 * The folio mapping generally shouldn't have changed based on
864 		 * fs locks, but be consistent with filemap lookup and retry
865 		 * the iter if it does.
866 		 */
867 		folio_lock(folio);
868 		if (unlikely(folio->mapping != iter->inode->i_mapping)) {
869 			iter->iomap.flags |= IOMAP_F_STALE;
870 			folio_unlock(folio);
871 			return NULL;
872 		}
873 
874 		folio_get(folio);
875 		folio_wait_stable(folio);
876 		return folio;
877 	}
878 
879 	if (write_ops && write_ops->get_folio)
880 		return write_ops->get_folio(iter, pos, len);
881 	return iomap_get_folio(iter, pos, len);
882 }
883 
884 static void __iomap_put_folio(struct iomap_iter *iter,
885 		const struct iomap_write_ops *write_ops, size_t ret,
886 		struct folio *folio)
887 {
888 	loff_t pos = iter->pos;
889 
890 	if (write_ops && write_ops->put_folio) {
891 		write_ops->put_folio(iter->inode, pos, ret, folio);
892 	} else {
893 		folio_unlock(folio);
894 		folio_put(folio);
895 	}
896 }
897 
898 /* trim pos and bytes to within a given folio */
899 static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
900 		struct folio *folio, size_t *offset, u64 *bytes)
901 {
902 	loff_t pos = iter->pos;
903 	size_t fsize = folio_size(folio);
904 
905 	WARN_ON_ONCE(pos < folio_pos(folio));
906 	WARN_ON_ONCE(pos >= folio_pos(folio) + fsize);
907 
908 	*offset = offset_in_folio(folio, pos);
909 	*bytes = min(*bytes, fsize - *offset);
910 
911 	return pos;
912 }
913 
914 static int iomap_write_begin_inline(const struct iomap_iter *iter,
915 		struct folio *folio)
916 {
917 	/* needs more work for the tailpacking case; disable for now */
918 	if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
919 		return -EIO;
920 	return iomap_read_inline_data(iter, folio);
921 }
922 
923 /*
924  * Grab and prepare a folio for write based on iter state. Returns the folio,
925  * offset, and length. Callers can optionally pass a max length *plen,
926  * otherwise init to zero.
927  */
928 static int iomap_write_begin(struct iomap_iter *iter,
929 		const struct iomap_write_ops *write_ops, struct folio **foliop,
930 		size_t *poffset, u64 *plen)
931 {
932 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
933 	loff_t pos;
934 	u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
935 	struct folio *folio;
936 	int status = 0;
937 
938 	len = min_not_zero(len, *plen);
939 	*foliop = NULL;
940 	*plen = 0;
941 
942 	if (fatal_signal_pending(current))
943 		return -EINTR;
944 
945 	folio = __iomap_get_folio(iter, write_ops, len);
946 	if (IS_ERR(folio))
947 		return PTR_ERR(folio);
948 
949 	/*
950 	 * No folio means we're done with a batch. We still have range to
951 	 * process so return and let the caller iterate and refill the batch.
952 	 */
953 	if (!folio) {
954 		WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
955 		return 0;
956 	}
957 
958 	/*
959 	 * Now we have a locked folio, before we do anything with it we need to
960 	 * check that the iomap we have cached is not stale. The inode extent
961 	 * mapping can change due to concurrent IO in flight (e.g.
962 	 * IOMAP_UNWRITTEN state can change and memory reclaim could have
963 	 * reclaimed a previously partially written page at this index after IO
964 	 * completion before this write reaches this file offset) and hence we
965 	 * could do the wrong thing here (zero a page range incorrectly or fail
966 	 * to zero) and corrupt data.
967 	 */
968 	if (write_ops && write_ops->iomap_valid) {
969 		bool iomap_valid = write_ops->iomap_valid(iter->inode,
970 							 &iter->iomap);
971 		if (!iomap_valid) {
972 			iter->iomap.flags |= IOMAP_F_STALE;
973 			status = 0;
974 			goto out_unlock;
975 		}
976 	}
977 
978 	/*
979 	 * The folios in a batch may not be contiguous. If we've skipped
980 	 * forward, advance the iter to the pos of the current folio. If the
981 	 * folio starts beyond the end of the mapping, it may have been trimmed
982 	 * since the lookup for whatever reason. Return a NULL folio to
983 	 * terminate the op.
984 	 */
985 	if (folio_pos(folio) > iter->pos) {
986 		len = min_t(u64, folio_pos(folio) - iter->pos,
987 				 iomap_length(iter));
988 		status = iomap_iter_advance(iter, len);
989 		len = iomap_length(iter);
990 		if (status || !len)
991 			goto out_unlock;
992 	}
993 
994 	pos = iomap_trim_folio_range(iter, folio, poffset, &len);
995 
996 	if (srcmap->type == IOMAP_INLINE)
997 		status = iomap_write_begin_inline(iter, folio);
998 	else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
999 		status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
1000 	else
1001 		status = __iomap_write_begin(iter, write_ops, len, folio);
1002 
1003 	if (unlikely(status))
1004 		goto out_unlock;
1005 
1006 	*foliop = folio;
1007 	*plen = len;
1008 	return 0;
1009 
1010 out_unlock:
1011 	__iomap_put_folio(iter, write_ops, 0, folio);
1012 	return status;
1013 }
1014 
1015 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
1016 		size_t copied, struct folio *folio)
1017 {
1018 	flush_dcache_folio(folio);
1019 
1020 	/*
1021 	 * The blocks that were entirely written will now be uptodate, so we
1022 	 * don't have to worry about a read_folio reading them and overwriting a
1023 	 * partial write.  However, if we've encountered a short write and only
1024 	 * partially written into a block, it will not be marked uptodate, so a
1025 	 * read_folio might come in and destroy our partial write.
1026 	 *
1027 	 * Do the simplest thing and just treat any short write to a
1028 	 * non-uptodate page as a zero-length write, and force the caller to
1029 	 * redo the whole thing.
1030 	 */
1031 	if (unlikely(copied < len && !folio_test_uptodate(folio)))
1032 		return false;
1033 	iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
1034 	iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
1035 	filemap_dirty_folio(inode->i_mapping, folio);
1036 	return true;
1037 }
1038 
1039 static bool iomap_write_end_inline(const struct iomap_iter *iter,
1040 		struct folio *folio, loff_t pos, size_t copied)
1041 {
1042 	const struct iomap *iomap = &iter->iomap;
1043 	void *addr;
1044 
1045 	WARN_ON_ONCE(!folio_test_uptodate(folio));
1046 	BUG_ON(!iomap_inline_data_valid(iomap));
1047 
1048 	if (WARN_ON_ONCE(!iomap->inline_data))
1049 		return false;
1050 
1051 	flush_dcache_folio(folio);
1052 	addr = kmap_local_folio(folio, pos);
1053 	memcpy(iomap_inline_data(iomap, pos), addr, copied);
1054 	kunmap_local(addr);
1055 
1056 	mark_inode_dirty(iter->inode);
1057 	return true;
1058 }
1059 
1060 /*
1061  * Returns true if all copied bytes have been written to the pagecache,
1062  * otherwise return false.
1063  */
1064 static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
1065 		struct folio *folio)
1066 {
1067 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1068 	loff_t pos = iter->pos;
1069 
1070 	if (srcmap->type == IOMAP_INLINE)
1071 		return iomap_write_end_inline(iter, folio, pos, copied);
1072 
1073 	if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
1074 		size_t bh_written;
1075 
1076 		bh_written = block_write_end(pos, len, copied, folio);
1077 		WARN_ON_ONCE(bh_written != copied && bh_written != 0);
1078 		return bh_written == copied;
1079 	}
1080 
1081 	return __iomap_write_end(iter->inode, pos, len, copied, folio);
1082 }
1083 
1084 static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
1085 		const struct iomap_write_ops *write_ops)
1086 {
1087 	ssize_t total_written = 0;
1088 	int status = 0;
1089 	struct address_space *mapping = iter->inode->i_mapping;
1090 	size_t chunk = mapping_max_folio_size(mapping);
1091 	unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
1092 
1093 	do {
1094 		struct folio *folio;
1095 		loff_t old_size;
1096 		size_t offset;		/* Offset into folio */
1097 		u64 bytes;		/* Bytes to write to folio */
1098 		size_t copied;		/* Bytes copied from user */
1099 		u64 written;		/* Bytes have been written */
1100 		loff_t pos;
1101 
1102 		bytes = iov_iter_count(i);
1103 retry:
1104 		offset = iter->pos & (chunk - 1);
1105 		bytes = min(chunk - offset, bytes);
1106 		status = balance_dirty_pages_ratelimited_flags(mapping,
1107 							       bdp_flags);
1108 		if (unlikely(status))
1109 			break;
1110 
1111 		if (bytes > iomap_length(iter))
1112 			bytes = iomap_length(iter);
1113 
1114 		/*
1115 		 * Bring in the user page that we'll copy from _first_.
1116 		 * Otherwise there's a nasty deadlock on copying from the
1117 		 * same page as we're writing to, without it being marked
1118 		 * up-to-date.
1119 		 *
1120 		 * For async buffered writes the assumption is that the user
1121 		 * page has already been faulted in. This can be optimized by
1122 		 * faulting the user page.
1123 		 */
1124 		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
1125 			status = -EFAULT;
1126 			break;
1127 		}
1128 
1129 		status = iomap_write_begin(iter, write_ops, &folio, &offset,
1130 				&bytes);
1131 		if (unlikely(status)) {
1132 			iomap_write_failed(iter->inode, iter->pos, bytes);
1133 			break;
1134 		}
1135 		if (iter->iomap.flags & IOMAP_F_STALE)
1136 			break;
1137 
1138 		pos = iter->pos;
1139 
1140 		if (mapping_writably_mapped(mapping))
1141 			flush_dcache_folio(folio);
1142 
1143 		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
1144 		written = iomap_write_end(iter, bytes, copied, folio) ?
1145 			  copied : 0;
1146 
1147 		/*
1148 		 * Update the in-memory inode size after copying the data into
1149 		 * the page cache.  It's up to the file system to write the
1150 		 * updated size to disk, preferably after I/O completion so that
1151 		 * no stale data is exposed.  Only once that's done can we
1152 		 * unlock and release the folio.
1153 		 */
1154 		old_size = iter->inode->i_size;
1155 		if (pos + written > old_size) {
1156 			i_size_write(iter->inode, pos + written);
1157 			iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
1158 		}
1159 		__iomap_put_folio(iter, write_ops, written, folio);
1160 
1161 		if (old_size < pos)
1162 			pagecache_isize_extended(iter->inode, old_size, pos);
1163 
1164 		cond_resched();
1165 		if (unlikely(written == 0)) {
1166 			/*
1167 			 * A short copy made iomap_write_end() reject the
1168 			 * thing entirely.  Might be memory poisoning
1169 			 * halfway through, might be a race with munmap,
1170 			 * might be severe memory pressure.
1171 			 */
1172 			iomap_write_failed(iter->inode, pos, bytes);
1173 			iov_iter_revert(i, copied);
1174 
1175 			if (chunk > PAGE_SIZE)
1176 				chunk /= 2;
1177 			if (copied) {
1178 				bytes = copied;
1179 				goto retry;
1180 			}
1181 		} else {
1182 			total_written += written;
1183 			iomap_iter_advance(iter, written);
1184 		}
1185 	} while (iov_iter_count(i) && iomap_length(iter));
1186 
1187 	return total_written ? 0 : status;
1188 }
1189 
1190 ssize_t
1191 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1192 		const struct iomap_ops *ops,
1193 		const struct iomap_write_ops *write_ops, void *private)
1194 {
1195 	struct iomap_iter iter = {
1196 		.inode		= iocb->ki_filp->f_mapping->host,
1197 		.pos		= iocb->ki_pos,
1198 		.len		= iov_iter_count(i),
1199 		.flags		= IOMAP_WRITE,
1200 		.private	= private,
1201 	};
1202 	ssize_t ret;
1203 
1204 	if (iocb->ki_flags & IOCB_NOWAIT)
1205 		iter.flags |= IOMAP_NOWAIT;
1206 	if (iocb->ki_flags & IOCB_DONTCACHE)
1207 		iter.flags |= IOMAP_DONTCACHE;
1208 
1209 	while ((ret = iomap_iter(&iter, ops)) > 0)
1210 		iter.status = iomap_write_iter(&iter, i, write_ops);
1211 
1212 	if (unlikely(iter.pos == iocb->ki_pos))
1213 		return ret;
1214 	ret = iter.pos - iocb->ki_pos;
1215 	iocb->ki_pos = iter.pos;
1216 	return ret;
1217 }
1218 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1219 
1220 static void iomap_write_delalloc_ifs_punch(struct inode *inode,
1221 		struct folio *folio, loff_t start_byte, loff_t end_byte,
1222 		struct iomap *iomap, iomap_punch_t punch)
1223 {
1224 	unsigned int first_blk, last_blk;
1225 	loff_t last_byte;
1226 	u8 blkbits = inode->i_blkbits;
1227 	struct iomap_folio_state *ifs;
1228 
1229 	/*
1230 	 * When we have per-block dirty tracking, there can be
1231 	 * blocks within a folio which are marked uptodate
1232 	 * but not dirty. In that case it is necessary to punch
1233 	 * out such blocks to avoid leaking any delalloc blocks.
1234 	 */
1235 	ifs = folio->private;
1236 	if (!ifs)
1237 		return;
1238 
1239 	last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
1240 	first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1241 	last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1242 	while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
1243 		       <= last_blk) {
1244 		punch(inode, folio_pos(folio) + (first_blk << blkbits),
1245 				1 << blkbits, iomap);
1246 		first_blk++;
1247 	}
1248 }
1249 
1250 static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1251 		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1252 		struct iomap *iomap, iomap_punch_t punch)
1253 {
1254 	if (!folio_test_dirty(folio))
1255 		return;
1256 
1257 	/* if dirty, punch up to offset */
1258 	if (start_byte > *punch_start_byte) {
1259 		punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
1260 				iomap);
1261 	}
1262 
1263 	/* Punch non-dirty blocks within folio */
1264 	iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
1265 			iomap, punch);
1266 
1267 	/*
1268 	 * Make sure the next punch start is correctly bound to
1269 	 * the end of this data range, not the end of the folio.
1270 	 */
1271 	*punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
1272 }
1273 
1274 /*
1275  * Scan the data range passed to us for dirty page cache folios. If we find a
1276  * dirty folio, punch out the preceding range and update the offset from which
1277  * the next punch will start from.
1278  *
1279  * We can punch out storage reservations under clean pages because they either
1280  * contain data that has been written back - in which case the delalloc punch
1281  * over that range is a no-op - or they have been read faults in which case they
1282  * contain zeroes and we can remove the delalloc backing range and any new
1283  * writes to those pages will do the normal hole filling operation...
1284  *
1285  * This makes the logic simple: we only need to keep the delalloc extents only
1286  * over the dirty ranges of the page cache.
1287  *
1288  * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1289  * simplify range iterations.
1290  */
1291 static void iomap_write_delalloc_scan(struct inode *inode,
1292 		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1293 		struct iomap *iomap, iomap_punch_t punch)
1294 {
1295 	while (start_byte < end_byte) {
1296 		struct folio	*folio;
1297 
1298 		/* grab locked page */
1299 		folio = filemap_lock_folio(inode->i_mapping,
1300 				start_byte >> PAGE_SHIFT);
1301 		if (IS_ERR(folio)) {
1302 			start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1303 					PAGE_SIZE;
1304 			continue;
1305 		}
1306 
1307 		iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1308 				start_byte, end_byte, iomap, punch);
1309 
1310 		/* move offset to start of next folio in range */
1311 		start_byte = folio_next_pos(folio);
1312 		folio_unlock(folio);
1313 		folio_put(folio);
1314 	}
1315 }
1316 
1317 /*
1318  * When a short write occurs, the filesystem might need to use ->iomap_end
1319  * to remove space reservations created in ->iomap_begin.
1320  *
1321  * For filesystems that use delayed allocation, there can be dirty pages over
1322  * the delalloc extent outside the range of a short write but still within the
1323  * delalloc extent allocated for this iomap if the write raced with page
1324  * faults.
1325  *
1326  * Punch out all the delalloc blocks in the range given except for those that
1327  * have dirty data still pending in the page cache - those are going to be
1328  * written and so must still retain the delalloc backing for writeback.
1329  *
1330  * The punch() callback *must* only punch delalloc extents in the range passed
1331  * to it. It must skip over all other types of extents in the range and leave
1332  * them completely unchanged. It must do this punch atomically with respect to
1333  * other extent modifications.
1334  *
1335  * The punch() callback may be called with a folio locked to prevent writeback
1336  * extent allocation racing at the edge of the range we are currently punching.
1337  * The locked folio may or may not cover the range being punched, so it is not
1338  * safe for the punch() callback to lock folios itself.
1339  *
1340  * Lock order is:
1341  *
1342  * inode->i_rwsem (shared or exclusive)
1343  *   inode->i_mapping->invalidate_lock (exclusive)
1344  *     folio_lock()
1345  *       ->punch
1346  *         internal filesystem allocation lock
1347  *
1348  * As we are scanning the page cache for data, we don't need to reimplement the
1349  * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1350  * start and end of data ranges correctly even for sub-folio block sizes. This
1351  * byte range based iteration is especially convenient because it means we
1352  * don't have to care about variable size folios, nor where the start or end of
1353  * the data range lies within a folio, if they lie within the same folio or even
1354  * if there are multiple discontiguous data ranges within the folio.
1355  *
1356  * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1357  * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1358  * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1359  * date. A write page fault can then mark it dirty. If we then fail a write()
1360  * beyond EOF into that up to date cached range, we allocate a delalloc block
1361  * beyond EOF and then have to punch it out. Because the range is up to date,
1362  * mapping_seek_hole_data() will return it, and we will skip the punch because
1363  * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1364  * beyond EOF in this case as writeback will never write back and covert that
1365  * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1366  * resulting in always punching out the range from the EOF to the end of the
1367  * range the iomap spans.
1368  *
1369  * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1370  * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1371  * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1372  * returns the end of the data range (data_end). Using closed intervals would
1373  * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1374  * the code to subtle off-by-one bugs....
1375  */
1376 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1377 		loff_t end_byte, unsigned flags, struct iomap *iomap,
1378 		iomap_punch_t punch)
1379 {
1380 	loff_t punch_start_byte = start_byte;
1381 	loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1382 
1383 	/*
1384 	 * The caller must hold invalidate_lock to avoid races with page faults
1385 	 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
1386 	 * we walk the cache and perform delalloc extent removal.  Failing to do
1387 	 * this can leave dirty pages with no space reservation in the cache.
1388 	 */
1389 	lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
1390 
1391 	while (start_byte < scan_end_byte) {
1392 		loff_t		data_end;
1393 
1394 		start_byte = mapping_seek_hole_data(inode->i_mapping,
1395 				start_byte, scan_end_byte, SEEK_DATA);
1396 		/*
1397 		 * If there is no more data to scan, all that is left is to
1398 		 * punch out the remaining range.
1399 		 *
1400 		 * Note that mapping_seek_hole_data is only supposed to return
1401 		 * either an offset or -ENXIO, so WARN on any other error as
1402 		 * that would be an API change without updating the callers.
1403 		 */
1404 		if (start_byte == -ENXIO || start_byte == scan_end_byte)
1405 			break;
1406 		if (WARN_ON_ONCE(start_byte < 0))
1407 			return;
1408 		WARN_ON_ONCE(start_byte < punch_start_byte);
1409 		WARN_ON_ONCE(start_byte > scan_end_byte);
1410 
1411 		/*
1412 		 * We find the end of this contiguous cached data range by
1413 		 * seeking from start_byte to the beginning of the next hole.
1414 		 */
1415 		data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1416 				scan_end_byte, SEEK_HOLE);
1417 		if (WARN_ON_ONCE(data_end < 0))
1418 			return;
1419 
1420 		/*
1421 		 * If we race with post-direct I/O invalidation of the page cache,
1422 		 * there might be no data left at start_byte.
1423 		 */
1424 		if (data_end == start_byte)
1425 			continue;
1426 
1427 		WARN_ON_ONCE(data_end < start_byte);
1428 		WARN_ON_ONCE(data_end > scan_end_byte);
1429 
1430 		iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
1431 				data_end, iomap, punch);
1432 
1433 		/* The next data search starts at the end of this one. */
1434 		start_byte = data_end;
1435 	}
1436 
1437 	if (punch_start_byte < end_byte)
1438 		punch(inode, punch_start_byte, end_byte - punch_start_byte,
1439 				iomap);
1440 }
1441 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
1442 
1443 static int iomap_unshare_iter(struct iomap_iter *iter,
1444 		const struct iomap_write_ops *write_ops)
1445 {
1446 	struct iomap *iomap = &iter->iomap;
1447 	u64 bytes = iomap_length(iter);
1448 	int status;
1449 
1450 	if (!iomap_want_unshare_iter(iter))
1451 		return iomap_iter_advance(iter, bytes);
1452 
1453 	do {
1454 		struct folio *folio;
1455 		size_t offset;
1456 		bool ret;
1457 
1458 		bytes = min_t(u64, SIZE_MAX, bytes);
1459 		status = iomap_write_begin(iter, write_ops, &folio, &offset,
1460 				&bytes);
1461 		if (unlikely(status))
1462 			return status;
1463 		if (iomap->flags & IOMAP_F_STALE)
1464 			break;
1465 
1466 		ret = iomap_write_end(iter, bytes, bytes, folio);
1467 		__iomap_put_folio(iter, write_ops, bytes, folio);
1468 		if (WARN_ON_ONCE(!ret))
1469 			return -EIO;
1470 
1471 		cond_resched();
1472 
1473 		balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1474 
1475 		status = iomap_iter_advance(iter, bytes);
1476 		if (status)
1477 			break;
1478 	} while ((bytes = iomap_length(iter)) > 0);
1479 
1480 	return status;
1481 }
1482 
1483 int
1484 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1485 		const struct iomap_ops *ops,
1486 		const struct iomap_write_ops *write_ops)
1487 {
1488 	struct iomap_iter iter = {
1489 		.inode		= inode,
1490 		.pos		= pos,
1491 		.flags		= IOMAP_WRITE | IOMAP_UNSHARE,
1492 	};
1493 	loff_t size = i_size_read(inode);
1494 	int ret;
1495 
1496 	if (pos < 0 || pos >= size)
1497 		return 0;
1498 
1499 	iter.len = min(len, size - pos);
1500 	while ((ret = iomap_iter(&iter, ops)) > 0)
1501 		iter.status = iomap_unshare_iter(&iter, write_ops);
1502 	return ret;
1503 }
1504 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1505 
1506 /*
1507  * Flush the remaining range of the iter and mark the current mapping stale.
1508  * This is used when zero range sees an unwritten mapping that may have had
1509  * dirty pagecache over it.
1510  */
1511 static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
1512 {
1513 	struct address_space *mapping = i->inode->i_mapping;
1514 	loff_t end = i->pos + i->len - 1;
1515 
1516 	i->iomap.flags |= IOMAP_F_STALE;
1517 	return filemap_write_and_wait_range(mapping, i->pos, end);
1518 }
1519 
1520 static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
1521 		const struct iomap_write_ops *write_ops)
1522 {
1523 	u64 bytes = iomap_length(iter);
1524 	int status;
1525 
1526 	do {
1527 		struct folio *folio;
1528 		size_t offset;
1529 		bool ret;
1530 
1531 		bytes = min_t(u64, SIZE_MAX, bytes);
1532 		status = iomap_write_begin(iter, write_ops, &folio, &offset,
1533 				&bytes);
1534 		if (status)
1535 			return status;
1536 		if (iter->iomap.flags & IOMAP_F_STALE)
1537 			break;
1538 
1539 		/* a NULL folio means we're done with a folio batch */
1540 		if (!folio) {
1541 			status = iomap_iter_advance_full(iter);
1542 			break;
1543 		}
1544 
1545 		/* warn about zeroing folios beyond eof that won't write back */
1546 		WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
1547 
1548 		trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
1549 				bytes);
1550 
1551 		folio_zero_range(folio, offset, bytes);
1552 		folio_mark_accessed(folio);
1553 
1554 		ret = iomap_write_end(iter, bytes, bytes, folio);
1555 		__iomap_put_folio(iter, write_ops, bytes, folio);
1556 		if (WARN_ON_ONCE(!ret))
1557 			return -EIO;
1558 
1559 		status = iomap_iter_advance(iter, bytes);
1560 		if (status)
1561 			break;
1562 	} while ((bytes = iomap_length(iter)) > 0);
1563 
1564 	if (did_zero)
1565 		*did_zero = true;
1566 	return status;
1567 }
1568 
1569 /**
1570  * iomap_fill_dirty_folios - fill a folio batch with dirty folios
1571  * @iter: Iteration structure
1572  * @start: Start offset of range. Updated based on lookup progress.
1573  * @end: End offset of range
1574  * @iomap_flags: Flags to set on the associated iomap to track the batch.
1575  *
1576  * Returns the folio count directly. Also returns the associated control flag if
1577  * the the batch lookup is performed and the expected offset of a subsequent
1578  * lookup via out params. The caller is responsible to set the flag on the
1579  * associated iomap.
1580  */
1581 unsigned int
1582 iomap_fill_dirty_folios(
1583 	struct iomap_iter	*iter,
1584 	loff_t			*start,
1585 	loff_t			end,
1586 	unsigned int		*iomap_flags)
1587 {
1588 	struct address_space	*mapping = iter->inode->i_mapping;
1589 	pgoff_t			pstart = *start >> PAGE_SHIFT;
1590 	pgoff_t			pend = (end - 1) >> PAGE_SHIFT;
1591 	unsigned int		count;
1592 
1593 	if (!iter->fbatch) {
1594 		*start = end;
1595 		return 0;
1596 	}
1597 
1598 	count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
1599 	*start = (pstart << PAGE_SHIFT);
1600 	*iomap_flags |= IOMAP_F_FOLIO_BATCH;
1601 	return count;
1602 }
1603 EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
1604 
1605 int
1606 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1607 		const struct iomap_ops *ops,
1608 		const struct iomap_write_ops *write_ops, void *private)
1609 {
1610 	struct folio_batch fbatch;
1611 	struct iomap_iter iter = {
1612 		.inode		= inode,
1613 		.pos		= pos,
1614 		.len		= len,
1615 		.flags		= IOMAP_ZERO,
1616 		.private	= private,
1617 		.fbatch		= &fbatch,
1618 	};
1619 	struct address_space *mapping = inode->i_mapping;
1620 	int ret;
1621 	bool range_dirty;
1622 
1623 	folio_batch_init(&fbatch);
1624 
1625 	/*
1626 	 * To avoid an unconditional flush, check pagecache state and only flush
1627 	 * if dirty and the fs returns a mapping that might convert on
1628 	 * writeback.
1629 	 */
1630 	range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
1631 					iter.pos + iter.len - 1);
1632 	while ((ret = iomap_iter(&iter, ops)) > 0) {
1633 		const struct iomap *srcmap = iomap_iter_srcmap(&iter);
1634 
1635 		if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1636 				 srcmap->type != IOMAP_UNWRITTEN))
1637 			return -EIO;
1638 
1639 		if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1640 		    (srcmap->type == IOMAP_HOLE ||
1641 		     srcmap->type == IOMAP_UNWRITTEN)) {
1642 			s64 status;
1643 
1644 			if (range_dirty) {
1645 				range_dirty = false;
1646 				status = iomap_zero_iter_flush_and_stale(&iter);
1647 			} else {
1648 				status = iomap_iter_advance_full(&iter);
1649 			}
1650 			iter.status = status;
1651 			continue;
1652 		}
1653 
1654 		iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
1655 	}
1656 	return ret;
1657 }
1658 EXPORT_SYMBOL_GPL(iomap_zero_range);
1659 
1660 int
1661 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1662 		const struct iomap_ops *ops,
1663 		const struct iomap_write_ops *write_ops, void *private)
1664 {
1665 	unsigned int blocksize = i_blocksize(inode);
1666 	unsigned int off = pos & (blocksize - 1);
1667 
1668 	/* Block boundary? Nothing to do */
1669 	if (!off)
1670 		return 0;
1671 	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
1672 			write_ops, private);
1673 }
1674 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1675 
1676 static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1677 		struct folio *folio)
1678 {
1679 	loff_t length = iomap_length(iter);
1680 	int ret;
1681 
1682 	if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1683 		ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1684 					      &iter->iomap);
1685 		if (ret)
1686 			return ret;
1687 		block_commit_write(folio, 0, length);
1688 	} else {
1689 		WARN_ON_ONCE(!folio_test_uptodate(folio));
1690 		folio_mark_dirty(folio);
1691 	}
1692 
1693 	return iomap_iter_advance(iter, length);
1694 }
1695 
1696 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
1697 		void *private)
1698 {
1699 	struct iomap_iter iter = {
1700 		.inode		= file_inode(vmf->vma->vm_file),
1701 		.flags		= IOMAP_WRITE | IOMAP_FAULT,
1702 		.private	= private,
1703 	};
1704 	struct folio *folio = page_folio(vmf->page);
1705 	ssize_t ret;
1706 
1707 	folio_lock(folio);
1708 	ret = folio_mkwrite_check_truncate(folio, iter.inode);
1709 	if (ret < 0)
1710 		goto out_unlock;
1711 	iter.pos = folio_pos(folio);
1712 	iter.len = ret;
1713 	while ((ret = iomap_iter(&iter, ops)) > 0)
1714 		iter.status = iomap_folio_mkwrite_iter(&iter, folio);
1715 
1716 	if (ret < 0)
1717 		goto out_unlock;
1718 	folio_wait_stable(folio);
1719 	return VM_FAULT_LOCKED;
1720 out_unlock:
1721 	folio_unlock(folio);
1722 	return vmf_fs_error(ret);
1723 }
1724 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1725 
1726 static void iomap_writeback_init(struct inode *inode, struct folio *folio)
1727 {
1728 	struct iomap_folio_state *ifs = folio->private;
1729 
1730 	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1731 	if (ifs) {
1732 		WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1733 		/*
1734 		 * Set this to the folio size. After processing the folio for
1735 		 * writeback in iomap_writeback_folio(), we'll subtract any
1736 		 * ranges not written back.
1737 		 *
1738 		 * We do this because otherwise, we would have to atomically
1739 		 * increment ifs->write_bytes_pending every time a range in the
1740 		 * folio needs to be written back.
1741 		 */
1742 		atomic_set(&ifs->write_bytes_pending, folio_size(folio));
1743 	}
1744 }
1745 
1746 void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1747 		size_t len)
1748 {
1749 	struct iomap_folio_state *ifs = folio->private;
1750 
1751 	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1752 	WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1753 
1754 	if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1755 		folio_end_writeback(folio);
1756 }
1757 EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
1758 
1759 static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
1760 		struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
1761 		size_t *bytes_submitted)
1762 {
1763 	do {
1764 		ssize_t ret;
1765 
1766 		ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
1767 		if (WARN_ON_ONCE(ret == 0 || ret > rlen))
1768 			return -EIO;
1769 		if (ret < 0)
1770 			return ret;
1771 		rlen -= ret;
1772 		pos += ret;
1773 
1774 		/*
1775 		 * Holes are not written back by ->writeback_range, so track
1776 		 * if we did handle anything that is not a hole here.
1777 		 */
1778 		if (wpc->iomap.type != IOMAP_HOLE)
1779 			*bytes_submitted += ret;
1780 	} while (rlen);
1781 
1782 	return 0;
1783 }
1784 
1785 /*
1786  * Check interaction of the folio with the file end.
1787  *
1788  * If the folio is entirely beyond i_size, return false.  If it straddles
1789  * i_size, adjust end_pos and zero all data beyond i_size.
1790  */
1791 static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
1792 		u64 *end_pos)
1793 {
1794 	u64 isize = i_size_read(inode);
1795 
1796 	if (*end_pos > isize) {
1797 		size_t poff = offset_in_folio(folio, isize);
1798 		pgoff_t end_index = isize >> PAGE_SHIFT;
1799 
1800 		/*
1801 		 * If the folio is entirely ouside of i_size, skip it.
1802 		 *
1803 		 * This can happen due to a truncate operation that is in
1804 		 * progress and in that case truncate will finish it off once
1805 		 * we've dropped the folio lock.
1806 		 *
1807 		 * Note that the pgoff_t used for end_index is an unsigned long.
1808 		 * If the given offset is greater than 16TB on a 32-bit system,
1809 		 * then if we checked if the folio is fully outside i_size with
1810 		 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1811 		 * overflow and evaluate to 0.  Hence this folio would be
1812 		 * redirtied and written out repeatedly, which would result in
1813 		 * an infinite loop; the user program performing this operation
1814 		 * would hang.  Instead, we can detect this situation by
1815 		 * checking if the folio is totally beyond i_size or if its
1816 		 * offset is just equal to the EOF.
1817 		 */
1818 		if (folio->index > end_index ||
1819 		    (folio->index == end_index && poff == 0))
1820 			return false;
1821 
1822 		/*
1823 		 * The folio straddles i_size.
1824 		 *
1825 		 * It must be zeroed out on each and every writepage invocation
1826 		 * because it may be mmapped:
1827 		 *
1828 		 *    A file is mapped in multiples of the page size.  For a
1829 		 *    file that is not a multiple of the page size, the
1830 		 *    remaining memory is zeroed when mapped, and writes to that
1831 		 *    region are not written out to the file.
1832 		 *
1833 		 * Also adjust the end_pos to the end of file and skip writeback
1834 		 * for all blocks entirely beyond i_size.
1835 		 */
1836 		folio_zero_segment(folio, poff, folio_size(folio));
1837 		*end_pos = isize;
1838 	}
1839 
1840 	return true;
1841 }
1842 
1843 int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
1844 {
1845 	struct iomap_folio_state *ifs = folio->private;
1846 	struct inode *inode = wpc->inode;
1847 	u64 pos = folio_pos(folio);
1848 	u64 end_pos = pos + folio_size(folio);
1849 	u64 end_aligned = 0;
1850 	loff_t orig_pos = pos;
1851 	size_t bytes_submitted = 0;
1852 	int error = 0;
1853 	u32 rlen;
1854 
1855 	WARN_ON_ONCE(!folio_test_locked(folio));
1856 	WARN_ON_ONCE(folio_test_dirty(folio));
1857 	WARN_ON_ONCE(folio_test_writeback(folio));
1858 
1859 	trace_iomap_writeback_folio(inode, pos, folio_size(folio));
1860 
1861 	if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
1862 		return 0;
1863 	WARN_ON_ONCE(end_pos <= pos);
1864 
1865 	if (i_blocks_per_folio(inode, folio) > 1) {
1866 		if (!ifs) {
1867 			ifs = ifs_alloc(inode, folio, 0);
1868 			iomap_set_range_dirty(folio, 0, end_pos - pos);
1869 		}
1870 
1871 		iomap_writeback_init(inode, folio);
1872 	}
1873 
1874 	/*
1875 	 * Set the writeback bit ASAP, as the I/O completion for the single
1876 	 * block per folio case happen hit as soon as we're submitting the bio.
1877 	 */
1878 	folio_start_writeback(folio);
1879 
1880 	/*
1881 	 * Walk through the folio to find dirty areas to write back.
1882 	 */
1883 	end_aligned = round_up(end_pos, i_blocksize(inode));
1884 	while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
1885 		error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
1886 				&bytes_submitted);
1887 		if (error)
1888 			break;
1889 		pos += rlen;
1890 	}
1891 
1892 	if (bytes_submitted)
1893 		wpc->nr_folios++;
1894 	if (error && pos > orig_pos)
1895 		fserror_report_io(inode, FSERR_BUFFERED_WRITE, orig_pos, 0,
1896 				  error, GFP_NOFS);
1897 
1898 	/*
1899 	 * We can have dirty bits set past end of file in page_mkwrite path
1900 	 * while mapping the last partial folio. Hence it's better to clear
1901 	 * all the dirty bits in the folio here.
1902 	 */
1903 	iomap_clear_range_dirty(folio, 0, folio_size(folio));
1904 
1905 	/*
1906 	 * Usually the writeback bit is cleared by the I/O completion handler.
1907 	 * But we may end up either not actually writing any blocks, or (when
1908 	 * there are multiple blocks in a folio) all I/O might have finished
1909 	 * already at this point.  In that case we need to clear the writeback
1910 	 * bit ourselves right after unlocking the page.
1911 	 */
1912 	if (ifs) {
1913 		/*
1914 		 * Subtract any bytes that were initially accounted to
1915 		 * write_bytes_pending but skipped for writeback.
1916 		 */
1917 		size_t bytes_not_submitted = folio_size(folio) -
1918 				bytes_submitted;
1919 
1920 		if (bytes_not_submitted)
1921 			iomap_finish_folio_write(inode, folio,
1922 					bytes_not_submitted);
1923 	} else if (!bytes_submitted) {
1924 		folio_end_writeback(folio);
1925 	}
1926 
1927 	mapping_set_error(inode->i_mapping, error);
1928 	return error;
1929 }
1930 EXPORT_SYMBOL_GPL(iomap_writeback_folio);
1931 
1932 int
1933 iomap_writepages(struct iomap_writepage_ctx *wpc)
1934 {
1935 	struct address_space *mapping = wpc->inode->i_mapping;
1936 	struct folio *folio = NULL;
1937 	int error;
1938 
1939 	/*
1940 	 * Writeback from reclaim context should never happen except in the case
1941 	 * of a VM regression so warn about it and refuse to write the data.
1942 	 */
1943 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
1944 			PF_MEMALLOC))
1945 		return -EIO;
1946 
1947 	while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
1948 		error = iomap_writeback_folio(wpc, folio);
1949 		folio_unlock(folio);
1950 	}
1951 
1952 	/*
1953 	 * If @error is non-zero, it means that we have a situation where some
1954 	 * part of the submission process has failed after we've marked pages
1955 	 * for writeback.
1956 	 *
1957 	 * We cannot cancel the writeback directly in that case, so always call
1958 	 * ->writeback_submit to run the I/O completion handler to clear the
1959 	 * writeback bit and let the file system proess the errors.
1960 	 */
1961 	if (wpc->wb_ctx)
1962 		return wpc->ops->writeback_submit(wpc, error);
1963 	return error;
1964 }
1965 EXPORT_SYMBOL_GPL(iomap_writepages);
1966