xref: /linux/fs/iomap/buffered-io.c (revision 0b3bb205808195159be633a8cefb602670e856fb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (C) 2016-2023 Christoph Hellwig.
5  */
6 #include <linux/iomap.h>
7 #include <linux/buffer_head.h>
8 #include <linux/writeback.h>
9 #include <linux/swap.h>
10 #include <linux/migrate.h>
11 #include <linux/fserror.h>
12 #include "internal.h"
13 #include "trace.h"
14 
15 #include "../internal.h"
16 
17 /*
18  * Structure allocated for each folio to track per-block uptodate, dirty state
19  * and I/O completions.
20  */
21 struct iomap_folio_state {
22 	spinlock_t		state_lock;
23 	unsigned int		read_bytes_pending;
24 	atomic_t		write_bytes_pending;
25 
26 	/*
27 	 * Each block has two bits in this bitmap:
28 	 * Bits [0..blocks_per_folio) has the uptodate status.
29 	 * Bits [b_p_f...(2*b_p_f))   has the dirty status.
30 	 */
31 	unsigned long		state[];
32 };
33 
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)34 static inline bool ifs_is_fully_uptodate(struct folio *folio,
35 		struct iomap_folio_state *ifs)
36 {
37 	struct inode *inode = folio->mapping->host;
38 
39 	return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
40 }
41 
42 /*
43  * Find the next uptodate block in the folio. end_blk is inclusive.
44  * If no uptodate block is found, this will return end_blk + 1.
45  */
ifs_next_uptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk)46 static unsigned ifs_next_uptodate_block(struct folio *folio,
47 		unsigned start_blk, unsigned end_blk)
48 {
49 	struct iomap_folio_state *ifs = folio->private;
50 
51 	return find_next_bit(ifs->state, end_blk + 1, start_blk);
52 }
53 
54 /*
55  * Find the next non-uptodate block in the folio. end_blk is inclusive.
56  * If no non-uptodate block is found, this will return end_blk + 1.
57  */
ifs_next_nonuptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk)58 static unsigned ifs_next_nonuptodate_block(struct folio *folio,
59 		unsigned start_blk, unsigned end_blk)
60 {
61 	struct iomap_folio_state *ifs = folio->private;
62 
63 	return find_next_zero_bit(ifs->state, end_blk + 1, start_blk);
64 }
65 
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)66 static bool ifs_set_range_uptodate(struct folio *folio,
67 		struct iomap_folio_state *ifs, size_t off, size_t len)
68 {
69 	struct inode *inode = folio->mapping->host;
70 	unsigned int first_blk = off >> inode->i_blkbits;
71 	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
72 	unsigned int nr_blks = last_blk - first_blk + 1;
73 
74 	bitmap_set(ifs->state, first_blk, nr_blks);
75 	return ifs_is_fully_uptodate(folio, ifs);
76 }
77 
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)78 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
79 		size_t len)
80 {
81 	struct iomap_folio_state *ifs = folio->private;
82 	unsigned long flags;
83 	bool mark_uptodate = true;
84 
85 	if (folio_test_uptodate(folio))
86 		return;
87 
88 	if (ifs) {
89 		spin_lock_irqsave(&ifs->state_lock, flags);
90 		/*
91 		 * If a read with bytes pending is in progress, we must not call
92 		 * folio_mark_uptodate(). The read completion path
93 		 * (iomap_read_end()) will call folio_end_read(), which uses XOR
94 		 * semantics to set the uptodate bit. If we set it here, the XOR
95 		 * in folio_end_read() will clear it, leaving the folio not
96 		 * uptodate.
97 		 */
98 		mark_uptodate = ifs_set_range_uptodate(folio, ifs, off, len) &&
99 				!ifs->read_bytes_pending;
100 		spin_unlock_irqrestore(&ifs->state_lock, flags);
101 	}
102 
103 	if (mark_uptodate)
104 		folio_mark_uptodate(folio);
105 }
106 
107 /*
108  * Find the next dirty block in the folio. end_blk is inclusive.
109  * If no dirty block is found, this will return end_blk + 1.
110  */
ifs_next_dirty_block(struct folio * folio,unsigned start_blk,unsigned end_blk)111 static unsigned ifs_next_dirty_block(struct folio *folio,
112 		unsigned start_blk, unsigned end_blk)
113 {
114 	struct iomap_folio_state *ifs = folio->private;
115 	struct inode *inode = folio->mapping->host;
116 	unsigned int blks = i_blocks_per_folio(inode, folio);
117 
118 	return find_next_bit(ifs->state, blks + end_blk + 1,
119 			blks + start_blk) - blks;
120 }
121 
122 /*
123  * Find the next clean block in the folio. end_blk is inclusive.
124  * If no clean block is found, this will return end_blk + 1.
125  */
ifs_next_clean_block(struct folio * folio,unsigned start_blk,unsigned end_blk)126 static unsigned ifs_next_clean_block(struct folio *folio,
127 		unsigned start_blk, unsigned end_blk)
128 {
129 	struct iomap_folio_state *ifs = folio->private;
130 	struct inode *inode = folio->mapping->host;
131 	unsigned int blks = i_blocks_per_folio(inode, folio);
132 
133 	return find_next_zero_bit(ifs->state, blks + end_blk + 1,
134 			blks + start_blk) - blks;
135 }
136 
ifs_find_dirty_range(struct folio * folio,struct iomap_folio_state * ifs,u64 * range_start,u64 range_end)137 static unsigned ifs_find_dirty_range(struct folio *folio,
138 		struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
139 {
140 	struct inode *inode = folio->mapping->host;
141 	unsigned start_blk =
142 		offset_in_folio(folio, *range_start) >> inode->i_blkbits;
143 	unsigned end_blk = min_not_zero(
144 		offset_in_folio(folio, range_end) >> inode->i_blkbits,
145 		i_blocks_per_folio(inode, folio)) - 1;
146 	unsigned nblks;
147 
148 	start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
149 	if (start_blk > end_blk)
150 		return 0;
151 	if (start_blk == end_blk)
152 		nblks = 1;
153 	else
154 		nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
155 				start_blk;
156 
157 	*range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
158 	return nblks << inode->i_blkbits;
159 }
160 
iomap_find_dirty_range(struct folio * folio,u64 * range_start,u64 range_end)161 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
162 		u64 range_end)
163 {
164 	struct iomap_folio_state *ifs = folio->private;
165 
166 	if (*range_start >= range_end)
167 		return 0;
168 
169 	if (ifs)
170 		return ifs_find_dirty_range(folio, ifs, range_start, range_end);
171 	return range_end - *range_start;
172 }
173 
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)174 static void ifs_clear_range_dirty(struct folio *folio,
175 		struct iomap_folio_state *ifs, size_t off, size_t len)
176 {
177 	struct inode *inode = folio->mapping->host;
178 	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
179 	unsigned int first_blk = (off >> inode->i_blkbits);
180 	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
181 	unsigned int nr_blks = last_blk - first_blk + 1;
182 	unsigned long flags;
183 
184 	spin_lock_irqsave(&ifs->state_lock, flags);
185 	bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
186 	spin_unlock_irqrestore(&ifs->state_lock, flags);
187 }
188 
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)189 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
190 {
191 	struct iomap_folio_state *ifs = folio->private;
192 
193 	if (ifs)
194 		ifs_clear_range_dirty(folio, ifs, off, len);
195 }
196 
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)197 static void ifs_set_range_dirty(struct folio *folio,
198 		struct iomap_folio_state *ifs, size_t off, size_t len)
199 {
200 	struct inode *inode = folio->mapping->host;
201 	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
202 	unsigned int first_blk = (off >> inode->i_blkbits);
203 	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
204 	unsigned int nr_blks = last_blk - first_blk + 1;
205 	unsigned long flags;
206 
207 	spin_lock_irqsave(&ifs->state_lock, flags);
208 	bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
209 	spin_unlock_irqrestore(&ifs->state_lock, flags);
210 }
211 
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)212 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
213 {
214 	struct iomap_folio_state *ifs = folio->private;
215 
216 	if (ifs)
217 		ifs_set_range_dirty(folio, ifs, off, len);
218 }
219 
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)220 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
221 		struct folio *folio, unsigned int flags)
222 {
223 	struct iomap_folio_state *ifs = folio->private;
224 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
225 	gfp_t gfp;
226 
227 	if (ifs || nr_blocks <= 1)
228 		return ifs;
229 
230 	if (flags & IOMAP_NOWAIT)
231 		gfp = GFP_NOWAIT;
232 	else
233 		gfp = GFP_NOFS | __GFP_NOFAIL;
234 
235 	/*
236 	 * ifs->state tracks two sets of state flags when the
237 	 * filesystem block size is smaller than the folio size.
238 	 * The first state tracks per-block uptodate and the
239 	 * second tracks per-block dirty state.
240 	 */
241 	ifs = kzalloc_flex(*ifs, state, BITS_TO_LONGS(2 * nr_blocks), gfp);
242 	if (!ifs)
243 		return ifs;
244 
245 	spin_lock_init(&ifs->state_lock);
246 	if (folio_test_uptodate(folio))
247 		bitmap_set(ifs->state, 0, nr_blocks);
248 	if (folio_test_dirty(folio))
249 		bitmap_set(ifs->state, nr_blocks, nr_blocks);
250 	folio_attach_private(folio, ifs);
251 
252 	return ifs;
253 }
254 
ifs_free(struct folio * folio)255 static void ifs_free(struct folio *folio)
256 {
257 	struct iomap_folio_state *ifs = folio_detach_private(folio);
258 
259 	if (!ifs)
260 		return;
261 	WARN_ON_ONCE(ifs->read_bytes_pending != 0);
262 	WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
263 	WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
264 			folio_test_uptodate(folio));
265 	kfree(ifs);
266 }
267 
268 /*
269  * Calculate how many bytes to truncate based off the number of blocks to
270  * truncate and the end position to start truncating from.
271  */
iomap_bytes_to_truncate(loff_t end_pos,unsigned block_bits,unsigned blocks_truncated)272 static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits,
273 		unsigned blocks_truncated)
274 {
275 	unsigned block_size = 1 << block_bits;
276 	unsigned block_offset = end_pos & (block_size - 1);
277 
278 	if (!block_offset)
279 		return blocks_truncated << block_bits;
280 
281 	return ((blocks_truncated - 1) << block_bits) + block_offset;
282 }
283 
284 /*
285  * Calculate the range inside the folio that we actually need to read.
286  */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)287 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
288 		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
289 {
290 	struct iomap_folio_state *ifs = folio->private;
291 	loff_t orig_pos = *pos;
292 	loff_t isize = i_size_read(inode);
293 	unsigned block_bits = inode->i_blkbits;
294 	unsigned block_size = (1 << block_bits);
295 	size_t poff = offset_in_folio(folio, *pos);
296 	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
297 	size_t orig_plen = plen;
298 	unsigned first = poff >> block_bits;
299 	unsigned last = (poff + plen - 1) >> block_bits;
300 
301 	/*
302 	 * If the block size is smaller than the page size, we need to check the
303 	 * per-block uptodate status and adjust the offset and length if needed
304 	 * to avoid reading in already uptodate ranges.
305 	 */
306 	if (ifs) {
307 		unsigned int next, blocks_skipped;
308 
309 		next = ifs_next_nonuptodate_block(folio, first, last);
310 		blocks_skipped = next - first;
311 
312 		if (blocks_skipped) {
313 			unsigned long block_offset = *pos & (block_size - 1);
314 			unsigned bytes_skipped =
315 				(blocks_skipped << block_bits) - block_offset;
316 
317 			*pos += bytes_skipped;
318 			poff += bytes_skipped;
319 			plen -= bytes_skipped;
320 		}
321 		first = next;
322 
323 		/* truncate len if we find any trailing uptodate block(s) */
324 		if (++next <= last) {
325 			next = ifs_next_uptodate_block(folio, next, last);
326 			if (next <= last) {
327 				plen -= iomap_bytes_to_truncate(*pos + plen,
328 						block_bits, last - next + 1);
329 				last = next - 1;
330 			}
331 		}
332 	}
333 
334 	/*
335 	 * If the extent spans the block that contains the i_size, we need to
336 	 * handle both halves separately so that we properly zero data in the
337 	 * page cache for blocks that are entirely outside of i_size.
338 	 */
339 	if (orig_pos <= isize && orig_pos + orig_plen > isize) {
340 		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
341 
342 		if (first <= end && last > end)
343 			plen -= iomap_bytes_to_truncate(*pos + plen, block_bits,
344 					last - end);
345 	}
346 
347 	*offp = poff;
348 	*lenp = plen;
349 }
350 
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)351 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
352 		loff_t pos)
353 {
354 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
355 
356 	return srcmap->type != IOMAP_MAPPED ||
357 		(srcmap->flags & IOMAP_F_NEW) ||
358 		pos >= i_size_read(iter->inode);
359 }
360 
361 /**
362  * iomap_read_inline_data - copy inline data into the page cache
363  * @iter: iteration structure
364  * @folio: folio to copy to
365  *
366  * Copy the inline data in @iter into @folio and zero out the rest of the folio.
367  * Only a single IOMAP_INLINE extent is allowed at the end of each file.
368  * Returns zero for success to complete the read, or the usual negative errno.
369  */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)370 static int iomap_read_inline_data(const struct iomap_iter *iter,
371 		struct folio *folio)
372 {
373 	const struct iomap *iomap = iomap_iter_srcmap(iter);
374 	size_t size = i_size_read(iter->inode) - iomap->offset;
375 	size_t offset = offset_in_folio(folio, iomap->offset);
376 
377 	if (WARN_ON_ONCE(!iomap->inline_data))
378 		return -EIO;
379 
380 	if (folio_test_uptodate(folio))
381 		return 0;
382 
383 	if (WARN_ON_ONCE(size > iomap->length)) {
384 		fserror_report_io(iter->inode, FSERR_BUFFERED_READ,
385 				  iomap->offset, size, -EIO, GFP_NOFS);
386 		return -EIO;
387 	}
388 	if (offset > 0)
389 		ifs_alloc(iter->inode, folio, iter->flags);
390 
391 	folio_fill_tail(folio, offset, iomap->inline_data, size);
392 	iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
393 	return 0;
394 }
395 
iomap_finish_folio_read(struct folio * folio,size_t off,size_t len,int error)396 void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
397 		int error)
398 {
399 	struct iomap_folio_state *ifs = folio->private;
400 	bool uptodate = !error;
401 	bool finished = true;
402 
403 	if (ifs) {
404 		unsigned long flags;
405 
406 		spin_lock_irqsave(&ifs->state_lock, flags);
407 		if (!error)
408 			uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
409 		ifs->read_bytes_pending -= len;
410 		finished = !ifs->read_bytes_pending;
411 		spin_unlock_irqrestore(&ifs->state_lock, flags);
412 	}
413 
414 	if (error)
415 		fserror_report_io(folio->mapping->host, FSERR_BUFFERED_READ,
416 				  folio_pos(folio) + off, len, error,
417 				  GFP_ATOMIC);
418 
419 	if (finished)
420 		folio_end_read(folio, uptodate);
421 }
422 EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
423 
iomap_read_init(struct folio * folio)424 static void iomap_read_init(struct folio *folio)
425 {
426 	struct iomap_folio_state *ifs = folio->private;
427 
428 	if (ifs) {
429 		/*
430 		 * ifs->read_bytes_pending is used to track how many bytes are
431 		 * read in asynchronously by the IO helper. We need to track
432 		 * this so that we can know when the IO helper has finished
433 		 * reading in all the necessary ranges of the folio and can end
434 		 * the read.
435 		 *
436 		 * Increase ->read_bytes_pending by the folio size to start.
437 		 * We'll subtract any uptodate / zeroed ranges that did not
438 		 * require IO in iomap_read_end() after we're done processing
439 		 * the folio.
440 		 *
441 		 * We do this because otherwise, we would have to increment
442 		 * ifs->read_bytes_pending every time a range in the folio needs
443 		 * to be read in, which can get expensive since the spinlock
444 		 * needs to be held whenever modifying ifs->read_bytes_pending.
445 		 */
446 		spin_lock_irq(&ifs->state_lock);
447 		WARN_ON_ONCE(ifs->read_bytes_pending != 0);
448 		ifs->read_bytes_pending = folio_size(folio);
449 		spin_unlock_irq(&ifs->state_lock);
450 	}
451 }
452 
453 /*
454  * This ends IO if no bytes were submitted to an IO helper.
455  *
456  * Otherwise, this calibrates ifs->read_bytes_pending to represent only the
457  * submitted bytes (see comment in iomap_read_init()). If all bytes submitted
458  * have already been completed by the IO helper, then this will end the read.
459  * Else the IO helper will end the read after all submitted ranges have been
460  * read.
461  */
iomap_read_end(struct folio * folio,size_t bytes_submitted)462 static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
463 {
464 	struct iomap_folio_state *ifs = folio->private;
465 
466 	if (ifs) {
467 		bool end_read, uptodate;
468 
469 		spin_lock_irq(&ifs->state_lock);
470 		if (!ifs->read_bytes_pending) {
471 			WARN_ON_ONCE(bytes_submitted);
472 			spin_unlock_irq(&ifs->state_lock);
473 			folio_unlock(folio);
474 			return;
475 		}
476 
477 		/*
478 		 * Subtract any bytes that were initially accounted to
479 		 * read_bytes_pending but skipped for IO.
480 		 */
481 		ifs->read_bytes_pending -= folio_size(folio) - bytes_submitted;
482 
483 		/*
484 		 * If !ifs->read_bytes_pending, this means all pending reads by
485 		 * the IO helper have already completed, which means we need to
486 		 * end the folio read here. If ifs->read_bytes_pending != 0,
487 		 * the IO helper will end the folio read.
488 		 */
489 		end_read = !ifs->read_bytes_pending;
490 		if (end_read)
491 			uptodate = ifs_is_fully_uptodate(folio, ifs);
492 		spin_unlock_irq(&ifs->state_lock);
493 		if (end_read)
494 			folio_end_read(folio, uptodate);
495 	} else {
496 		/*
497 		 * If a folio without an ifs is submitted to the IO helper, the
498 		 * read must be on the entire folio and the IO helper takes
499 		 * ownership of the folio. This means we should only enter
500 		 * iomap_read_end() for the !ifs case if no bytes were submitted
501 		 * to the IO helper, in which case we are responsible for
502 		 * unlocking the folio here.
503 		 */
504 		WARN_ON_ONCE(bytes_submitted);
505 		folio_unlock(folio);
506 	}
507 }
508 
iomap_read_folio_iter(struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t * bytes_submitted)509 static int iomap_read_folio_iter(struct iomap_iter *iter,
510 		struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted)
511 {
512 	const struct iomap *iomap = &iter->iomap;
513 	loff_t pos = iter->pos;
514 	loff_t length = iomap_length(iter);
515 	struct folio *folio = ctx->cur_folio;
516 	size_t folio_len = folio_size(folio);
517 	size_t poff, plen;
518 	loff_t pos_diff;
519 	int ret;
520 
521 	if (iomap->type == IOMAP_INLINE) {
522 		ret = iomap_read_inline_data(iter, folio);
523 		if (ret)
524 			return ret;
525 		return iomap_iter_advance(iter, length);
526 	}
527 
528 	ifs_alloc(iter->inode, folio, iter->flags);
529 
530 	length = min_t(loff_t, length, folio_len - offset_in_folio(folio, pos));
531 	while (length) {
532 		iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
533 				&plen);
534 
535 		pos_diff = pos - iter->pos;
536 		if (WARN_ON_ONCE(pos_diff + plen > length))
537 			return -EIO;
538 
539 		ret = iomap_iter_advance(iter, pos_diff);
540 		if (ret)
541 			return ret;
542 
543 		if (plen == 0)
544 			return 0;
545 
546 		/* zero post-eof blocks as the page may be mapped */
547 		if (iomap_block_needs_zeroing(iter, pos)) {
548 			folio_zero_range(folio, poff, plen);
549 			iomap_set_range_uptodate(folio, poff, plen);
550 		} else {
551 			if (!*bytes_submitted)
552 				iomap_read_init(folio);
553 			ret = ctx->ops->read_folio_range(iter, ctx, plen);
554 			if (ret < 0)
555 				fserror_report_io(iter->inode,
556 						  FSERR_BUFFERED_READ, pos,
557 						  plen, ret, GFP_NOFS);
558 			if (ret)
559 				return ret;
560 
561 			*bytes_submitted += plen;
562 			/*
563 			 * If the entire folio has been read in by the IO
564 			 * helper, then the helper owns the folio and will end
565 			 * the read on it.
566 			 */
567 			if (*bytes_submitted == folio_len)
568 				ctx->cur_folio = NULL;
569 		}
570 
571 		ret = iomap_iter_advance(iter, plen);
572 		if (ret)
573 			return ret;
574 		length -= pos_diff + plen;
575 		pos = iter->pos;
576 	}
577 	return 0;
578 }
579 
iomap_read_folio(const struct iomap_ops * ops,struct iomap_read_folio_ctx * ctx,void * private)580 void iomap_read_folio(const struct iomap_ops *ops,
581 		struct iomap_read_folio_ctx *ctx, void *private)
582 {
583 	struct folio *folio = ctx->cur_folio;
584 	struct iomap_iter iter = {
585 		.inode		= folio->mapping->host,
586 		.pos		= folio_pos(folio),
587 		.len		= folio_size(folio),
588 		.private	= private,
589 	};
590 	size_t bytes_submitted = 0;
591 	int ret;
592 
593 	trace_iomap_readpage(iter.inode, 1);
594 
595 	while ((ret = iomap_iter(&iter, ops)) > 0)
596 		iter.status = iomap_read_folio_iter(&iter, ctx,
597 				&bytes_submitted);
598 
599 	if (ctx->ops->submit_read)
600 		ctx->ops->submit_read(ctx);
601 
602 	if (ctx->cur_folio)
603 		iomap_read_end(ctx->cur_folio, bytes_submitted);
604 }
605 EXPORT_SYMBOL_GPL(iomap_read_folio);
606 
iomap_readahead_iter(struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t * cur_bytes_submitted)607 static int iomap_readahead_iter(struct iomap_iter *iter,
608 		struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted)
609 {
610 	int ret;
611 
612 	while (iomap_length(iter)) {
613 		if (ctx->cur_folio &&
614 		    offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
615 			iomap_read_end(ctx->cur_folio, *cur_bytes_submitted);
616 			ctx->cur_folio = NULL;
617 		}
618 		if (!ctx->cur_folio) {
619 			ctx->cur_folio = readahead_folio(ctx->rac);
620 			if (WARN_ON_ONCE(!ctx->cur_folio))
621 				return -EINVAL;
622 			*cur_bytes_submitted = 0;
623 		}
624 		ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
625 		if (ret)
626 			return ret;
627 	}
628 
629 	return 0;
630 }
631 
632 /**
633  * iomap_readahead - Attempt to read pages from a file.
634  * @ops: The operations vector for the filesystem.
635  * @ctx: The ctx used for issuing readahead.
636  * @private: The filesystem-specific information for issuing iomap_iter.
637  *
638  * This function is for filesystems to call to implement their readahead
639  * address_space operation.
640  *
641  * Context: The @ops callbacks may submit I/O (eg to read the addresses of
642  * blocks from disc), and may wait for it.  The caller may be trying to
643  * access a different page, and so sleeping excessively should be avoided.
644  * It may allocate memory, but should avoid costly allocations.  This
645  * function is called with memalloc_nofs set, so allocations will not cause
646  * the filesystem to be reentered.
647  */
iomap_readahead(const struct iomap_ops * ops,struct iomap_read_folio_ctx * ctx,void * private)648 void iomap_readahead(const struct iomap_ops *ops,
649 		struct iomap_read_folio_ctx *ctx, void *private)
650 {
651 	struct readahead_control *rac = ctx->rac;
652 	struct iomap_iter iter = {
653 		.inode	= rac->mapping->host,
654 		.pos	= readahead_pos(rac),
655 		.len	= readahead_length(rac),
656 		.private = private,
657 	};
658 	size_t cur_bytes_submitted;
659 
660 	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
661 
662 	while (iomap_iter(&iter, ops) > 0)
663 		iter.status = iomap_readahead_iter(&iter, ctx,
664 					&cur_bytes_submitted);
665 
666 	if (ctx->ops->submit_read)
667 		ctx->ops->submit_read(ctx);
668 
669 	if (ctx->cur_folio)
670 		iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
671 }
672 EXPORT_SYMBOL_GPL(iomap_readahead);
673 
674 /*
675  * iomap_is_partially_uptodate checks whether blocks within a folio are
676  * uptodate or not.
677  *
678  * Returns true if all blocks which correspond to the specified part
679  * of the folio are uptodate.
680  */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)681 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
682 {
683 	struct iomap_folio_state *ifs = folio->private;
684 	struct inode *inode = folio->mapping->host;
685 	unsigned first, last;
686 
687 	if (!ifs)
688 		return false;
689 
690 	/* Caller's range may extend past the end of this folio */
691 	count = min(folio_size(folio) - from, count);
692 
693 	/* First and last blocks in range within folio */
694 	first = from >> inode->i_blkbits;
695 	last = (from + count - 1) >> inode->i_blkbits;
696 
697 	return ifs_next_nonuptodate_block(folio, first, last) > last;
698 }
699 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
700 
701 /**
702  * iomap_get_folio - get a folio reference for writing
703  * @iter: iteration structure
704  * @pos: start offset of write
705  * @len: Suggested size of folio to create.
706  *
707  * Returns a locked reference to the folio at @pos, or an error pointer if the
708  * folio could not be obtained.
709  */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)710 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
711 {
712 	fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
713 
714 	if (iter->flags & IOMAP_NOWAIT)
715 		fgp |= FGP_NOWAIT;
716 	if (iter->flags & IOMAP_DONTCACHE)
717 		fgp |= FGP_DONTCACHE;
718 	fgp |= fgf_set_order(len);
719 
720 	return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
721 			fgp, mapping_gfp_mask(iter->inode->i_mapping));
722 }
723 EXPORT_SYMBOL_GPL(iomap_get_folio);
724 
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)725 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
726 {
727 	trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
728 			folio_size(folio));
729 
730 	/*
731 	 * If the folio is dirty, we refuse to release our metadata because
732 	 * it may be partially dirty.  Once we track per-block dirty state,
733 	 * we can release the metadata if every block is dirty.
734 	 */
735 	if (folio_test_dirty(folio))
736 		return false;
737 	ifs_free(folio);
738 	return true;
739 }
740 EXPORT_SYMBOL_GPL(iomap_release_folio);
741 
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)742 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
743 {
744 	trace_iomap_invalidate_folio(folio->mapping->host,
745 					folio_pos(folio) + offset, len);
746 
747 	/*
748 	 * If we're invalidating the entire folio, clear the dirty state
749 	 * from it and release it to avoid unnecessary buildup of the LRU.
750 	 */
751 	if (offset == 0 && len == folio_size(folio)) {
752 		WARN_ON_ONCE(folio_test_writeback(folio));
753 		folio_cancel_dirty(folio);
754 		ifs_free(folio);
755 	}
756 }
757 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
758 
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)759 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
760 {
761 	struct inode *inode = mapping->host;
762 	size_t len = folio_size(folio);
763 
764 	ifs_alloc(inode, folio, 0);
765 	iomap_set_range_dirty(folio, 0, len);
766 	return filemap_dirty_folio(mapping, folio);
767 }
768 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
769 
770 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)771 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
772 {
773 	loff_t i_size = i_size_read(inode);
774 
775 	/*
776 	 * Only truncate newly allocated pages beyoned EOF, even if the
777 	 * write started inside the existing inode size.
778 	 */
779 	if (pos + len > i_size)
780 		truncate_pagecache_range(inode, max(pos, i_size),
781 					 pos + len - 1);
782 }
783 
__iomap_write_begin(const struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t len,struct folio * folio)784 static int __iomap_write_begin(const struct iomap_iter *iter,
785 		const struct iomap_write_ops *write_ops, size_t len,
786 		struct folio *folio)
787 {
788 	struct iomap_folio_state *ifs;
789 	loff_t pos = iter->pos;
790 	loff_t block_size = i_blocksize(iter->inode);
791 	loff_t block_start = round_down(pos, block_size);
792 	loff_t block_end = round_up(pos + len, block_size);
793 	unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
794 	size_t from = offset_in_folio(folio, pos), to = from + len;
795 	size_t poff, plen;
796 
797 	/*
798 	 * If the write or zeroing completely overlaps the current folio, then
799 	 * entire folio will be dirtied so there is no need for
800 	 * per-block state tracking structures to be attached to this folio.
801 	 * For the unshare case, we must read in the ondisk contents because we
802 	 * are not changing pagecache contents.
803 	 */
804 	if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
805 	    pos + len >= folio_next_pos(folio))
806 		return 0;
807 
808 	ifs = ifs_alloc(iter->inode, folio, iter->flags);
809 	if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
810 		return -EAGAIN;
811 
812 	if (folio_test_uptodate(folio))
813 		return 0;
814 
815 	do {
816 		iomap_adjust_read_range(iter->inode, folio, &block_start,
817 				block_end - block_start, &poff, &plen);
818 		if (plen == 0)
819 			break;
820 
821 		/*
822 		 * If the read range will be entirely overwritten by the write,
823 		 * we can skip having to zero/read it in.
824 		 */
825 		if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
826 		    to >= poff + plen)
827 			continue;
828 
829 		if (iomap_block_needs_zeroing(iter, block_start)) {
830 			if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
831 				return -EIO;
832 			folio_zero_segments(folio, poff, from, to, poff + plen);
833 		} else {
834 			int status;
835 
836 			if (iter->flags & IOMAP_NOWAIT)
837 				return -EAGAIN;
838 
839 			if (write_ops && write_ops->read_folio_range)
840 				status = write_ops->read_folio_range(iter,
841 						folio, block_start, plen);
842 			else
843 				status = iomap_bio_read_folio_range_sync(iter,
844 						folio, block_start, plen);
845 			if (status < 0)
846 				fserror_report_io(iter->inode,
847 						  FSERR_BUFFERED_READ, pos,
848 						  len, status, GFP_NOFS);
849 			if (status)
850 				return status;
851 		}
852 		iomap_set_range_uptodate(folio, poff, plen);
853 	} while ((block_start += plen) < block_end);
854 
855 	return 0;
856 }
857 
__iomap_get_folio(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t len)858 static struct folio *__iomap_get_folio(struct iomap_iter *iter,
859 		const struct iomap_write_ops *write_ops, size_t len)
860 {
861 	loff_t pos = iter->pos;
862 
863 	if (!mapping_large_folio_support(iter->inode->i_mapping))
864 		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
865 
866 	if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
867 		struct folio *folio = folio_batch_next(iter->fbatch);
868 
869 		if (!folio)
870 			return NULL;
871 
872 		/*
873 		 * The folio mapping generally shouldn't have changed based on
874 		 * fs locks, but be consistent with filemap lookup and retry
875 		 * the iter if it does.
876 		 */
877 		folio_lock(folio);
878 		if (unlikely(folio->mapping != iter->inode->i_mapping)) {
879 			iter->iomap.flags |= IOMAP_F_STALE;
880 			folio_unlock(folio);
881 			return NULL;
882 		}
883 
884 		folio_get(folio);
885 		folio_wait_stable(folio);
886 		return folio;
887 	}
888 
889 	if (write_ops && write_ops->get_folio)
890 		return write_ops->get_folio(iter, pos, len);
891 	return iomap_get_folio(iter, pos, len);
892 }
893 
__iomap_put_folio(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t ret,struct folio * folio)894 static void __iomap_put_folio(struct iomap_iter *iter,
895 		const struct iomap_write_ops *write_ops, size_t ret,
896 		struct folio *folio)
897 {
898 	loff_t pos = iter->pos;
899 
900 	if (write_ops && write_ops->put_folio) {
901 		write_ops->put_folio(iter->inode, pos, ret, folio);
902 	} else {
903 		folio_unlock(folio);
904 		folio_put(folio);
905 	}
906 }
907 
908 /* trim pos and bytes to within a given folio */
iomap_trim_folio_range(struct iomap_iter * iter,struct folio * folio,size_t * offset,u64 * bytes)909 static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
910 		struct folio *folio, size_t *offset, u64 *bytes)
911 {
912 	loff_t pos = iter->pos;
913 	size_t fsize = folio_size(folio);
914 
915 	WARN_ON_ONCE(pos < folio_pos(folio));
916 	WARN_ON_ONCE(pos >= folio_pos(folio) + fsize);
917 
918 	*offset = offset_in_folio(folio, pos);
919 	*bytes = min(*bytes, fsize - *offset);
920 
921 	return pos;
922 }
923 
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)924 static int iomap_write_begin_inline(const struct iomap_iter *iter,
925 		struct folio *folio)
926 {
927 	/* needs more work for the tailpacking case; disable for now */
928 	if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
929 		return -EIO;
930 	return iomap_read_inline_data(iter, folio);
931 }
932 
933 /*
934  * Grab and prepare a folio for write based on iter state. Returns the folio,
935  * offset, and length. Callers can optionally pass a max length *plen,
936  * otherwise init to zero.
937  */
iomap_write_begin(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,struct folio ** foliop,size_t * poffset,u64 * plen)938 static int iomap_write_begin(struct iomap_iter *iter,
939 		const struct iomap_write_ops *write_ops, struct folio **foliop,
940 		size_t *poffset, u64 *plen)
941 {
942 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
943 	loff_t pos;
944 	u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
945 	struct folio *folio;
946 	int status = 0;
947 
948 	len = min_not_zero(len, *plen);
949 	*foliop = NULL;
950 	*plen = 0;
951 
952 	if (fatal_signal_pending(current))
953 		return -EINTR;
954 
955 	folio = __iomap_get_folio(iter, write_ops, len);
956 	if (IS_ERR(folio))
957 		return PTR_ERR(folio);
958 
959 	/*
960 	 * No folio means we're done with a batch. We still have range to
961 	 * process so return and let the caller iterate and refill the batch.
962 	 */
963 	if (!folio) {
964 		WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
965 		return 0;
966 	}
967 
968 	/*
969 	 * Now we have a locked folio, before we do anything with it we need to
970 	 * check that the iomap we have cached is not stale. The inode extent
971 	 * mapping can change due to concurrent IO in flight (e.g.
972 	 * IOMAP_UNWRITTEN state can change and memory reclaim could have
973 	 * reclaimed a previously partially written page at this index after IO
974 	 * completion before this write reaches this file offset) and hence we
975 	 * could do the wrong thing here (zero a page range incorrectly or fail
976 	 * to zero) and corrupt data.
977 	 */
978 	if (write_ops && write_ops->iomap_valid) {
979 		bool iomap_valid = write_ops->iomap_valid(iter->inode,
980 							 &iter->iomap);
981 		if (!iomap_valid) {
982 			iter->iomap.flags |= IOMAP_F_STALE;
983 			status = 0;
984 			goto out_unlock;
985 		}
986 	}
987 
988 	/*
989 	 * The folios in a batch may not be contiguous. If we've skipped
990 	 * forward, advance the iter to the pos of the current folio. If the
991 	 * folio starts beyond the end of the mapping, it may have been trimmed
992 	 * since the lookup for whatever reason. Return a NULL folio to
993 	 * terminate the op.
994 	 */
995 	if (folio_pos(folio) > iter->pos) {
996 		len = min_t(u64, folio_pos(folio) - iter->pos,
997 				 iomap_length(iter));
998 		status = iomap_iter_advance(iter, len);
999 		len = iomap_length(iter);
1000 		if (status || !len)
1001 			goto out_unlock;
1002 	}
1003 
1004 	pos = iomap_trim_folio_range(iter, folio, poffset, &len);
1005 
1006 	if (srcmap->type == IOMAP_INLINE)
1007 		status = iomap_write_begin_inline(iter, folio);
1008 	else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
1009 		status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
1010 	else
1011 		status = __iomap_write_begin(iter, write_ops, len, folio);
1012 
1013 	if (unlikely(status))
1014 		goto out_unlock;
1015 
1016 	*foliop = folio;
1017 	*plen = len;
1018 	return 0;
1019 
1020 out_unlock:
1021 	__iomap_put_folio(iter, write_ops, 0, folio);
1022 	return status;
1023 }
1024 
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)1025 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
1026 		size_t copied, struct folio *folio)
1027 {
1028 	flush_dcache_folio(folio);
1029 
1030 	/*
1031 	 * The blocks that were entirely written will now be uptodate, so we
1032 	 * don't have to worry about a read_folio reading them and overwriting a
1033 	 * partial write.  However, if we've encountered a short write and only
1034 	 * partially written into a block, it will not be marked uptodate, so a
1035 	 * read_folio might come in and destroy our partial write.
1036 	 *
1037 	 * Do the simplest thing and just treat any short write to a
1038 	 * non-uptodate page as a zero-length write, and force the caller to
1039 	 * redo the whole thing.
1040 	 */
1041 	if (unlikely(copied < len && !folio_test_uptodate(folio)))
1042 		return false;
1043 	iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
1044 	iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
1045 	filemap_dirty_folio(inode->i_mapping, folio);
1046 	return true;
1047 }
1048 
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)1049 static bool iomap_write_end_inline(const struct iomap_iter *iter,
1050 		struct folio *folio, loff_t pos, size_t copied)
1051 {
1052 	const struct iomap *iomap = &iter->iomap;
1053 	void *addr;
1054 
1055 	WARN_ON_ONCE(!folio_test_uptodate(folio));
1056 	BUG_ON(!iomap_inline_data_valid(iomap));
1057 
1058 	if (WARN_ON_ONCE(!iomap->inline_data))
1059 		return false;
1060 
1061 	flush_dcache_folio(folio);
1062 	addr = kmap_local_folio(folio, pos);
1063 	memcpy(iomap_inline_data(iomap, pos), addr, copied);
1064 	kunmap_local(addr);
1065 
1066 	mark_inode_dirty(iter->inode);
1067 	return true;
1068 }
1069 
1070 /*
1071  * Returns true if all copied bytes have been written to the pagecache,
1072  * otherwise return false.
1073  */
iomap_write_end(struct iomap_iter * iter,size_t len,size_t copied,struct folio * folio)1074 static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
1075 		struct folio *folio)
1076 {
1077 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1078 	loff_t pos = iter->pos;
1079 
1080 	if (srcmap->type == IOMAP_INLINE)
1081 		return iomap_write_end_inline(iter, folio, pos, copied);
1082 
1083 	if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
1084 		size_t bh_written;
1085 
1086 		bh_written = block_write_end(pos, len, copied, folio);
1087 		WARN_ON_ONCE(bh_written != copied && bh_written != 0);
1088 		return bh_written == copied;
1089 	}
1090 
1091 	return __iomap_write_end(iter->inode, pos, len, copied, folio);
1092 }
1093 
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i,const struct iomap_write_ops * write_ops)1094 static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
1095 		const struct iomap_write_ops *write_ops)
1096 {
1097 	ssize_t total_written = 0;
1098 	int status = 0;
1099 	struct address_space *mapping = iter->inode->i_mapping;
1100 	size_t chunk = mapping_max_folio_size(mapping);
1101 	unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
1102 
1103 	do {
1104 		struct folio *folio;
1105 		loff_t old_size;
1106 		size_t offset;		/* Offset into folio */
1107 		u64 bytes;		/* Bytes to write to folio */
1108 		size_t copied;		/* Bytes copied from user */
1109 		u64 written;		/* Bytes have been written */
1110 		loff_t pos;
1111 
1112 		bytes = iov_iter_count(i);
1113 retry:
1114 		offset = iter->pos & (chunk - 1);
1115 		bytes = min(chunk - offset, bytes);
1116 		status = balance_dirty_pages_ratelimited_flags(mapping,
1117 							       bdp_flags);
1118 		if (unlikely(status))
1119 			break;
1120 
1121 		if (bytes > iomap_length(iter))
1122 			bytes = iomap_length(iter);
1123 
1124 		/*
1125 		 * Bring in the user page that we'll copy from _first_.
1126 		 * Otherwise there's a nasty deadlock on copying from the
1127 		 * same page as we're writing to, without it being marked
1128 		 * up-to-date.
1129 		 *
1130 		 * For async buffered writes the assumption is that the user
1131 		 * page has already been faulted in. This can be optimized by
1132 		 * faulting the user page.
1133 		 */
1134 		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
1135 			status = -EFAULT;
1136 			break;
1137 		}
1138 
1139 		status = iomap_write_begin(iter, write_ops, &folio, &offset,
1140 				&bytes);
1141 		if (unlikely(status)) {
1142 			iomap_write_failed(iter->inode, iter->pos, bytes);
1143 			break;
1144 		}
1145 		if (iter->iomap.flags & IOMAP_F_STALE)
1146 			break;
1147 
1148 		pos = iter->pos;
1149 
1150 		if (mapping_writably_mapped(mapping))
1151 			flush_dcache_folio(folio);
1152 
1153 		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
1154 		written = iomap_write_end(iter, bytes, copied, folio) ?
1155 			  copied : 0;
1156 
1157 		/*
1158 		 * Update the in-memory inode size after copying the data into
1159 		 * the page cache.  It's up to the file system to write the
1160 		 * updated size to disk, preferably after I/O completion so that
1161 		 * no stale data is exposed.  Only once that's done can we
1162 		 * unlock and release the folio.
1163 		 */
1164 		old_size = iter->inode->i_size;
1165 		if (pos + written > old_size) {
1166 			i_size_write(iter->inode, pos + written);
1167 			iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
1168 		}
1169 		__iomap_put_folio(iter, write_ops, written, folio);
1170 
1171 		if (old_size < pos)
1172 			pagecache_isize_extended(iter->inode, old_size, pos);
1173 
1174 		cond_resched();
1175 		if (unlikely(written == 0)) {
1176 			/*
1177 			 * A short copy made iomap_write_end() reject the
1178 			 * thing entirely.  Might be memory poisoning
1179 			 * halfway through, might be a race with munmap,
1180 			 * might be severe memory pressure.
1181 			 */
1182 			iomap_write_failed(iter->inode, pos, bytes);
1183 			iov_iter_revert(i, copied);
1184 
1185 			if (chunk > PAGE_SIZE)
1186 				chunk /= 2;
1187 			if (copied) {
1188 				bytes = copied;
1189 				goto retry;
1190 			}
1191 		} else {
1192 			total_written += written;
1193 			iomap_iter_advance(iter, written);
1194 		}
1195 	} while (iov_iter_count(i) && iomap_length(iter));
1196 
1197 	return total_written ? 0 : status;
1198 }
1199 
1200 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1201 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1202 		const struct iomap_ops *ops,
1203 		const struct iomap_write_ops *write_ops, void *private)
1204 {
1205 	struct iomap_iter iter = {
1206 		.inode		= iocb->ki_filp->f_mapping->host,
1207 		.pos		= iocb->ki_pos,
1208 		.len		= iov_iter_count(i),
1209 		.flags		= IOMAP_WRITE,
1210 		.private	= private,
1211 	};
1212 	ssize_t ret;
1213 
1214 	if (iocb->ki_flags & IOCB_NOWAIT)
1215 		iter.flags |= IOMAP_NOWAIT;
1216 	if (iocb->ki_flags & IOCB_DONTCACHE)
1217 		iter.flags |= IOMAP_DONTCACHE;
1218 
1219 	while ((ret = iomap_iter(&iter, ops)) > 0)
1220 		iter.status = iomap_write_iter(&iter, i, write_ops);
1221 
1222 	if (unlikely(iter.pos == iocb->ki_pos))
1223 		return ret;
1224 	ret = iter.pos - iocb->ki_pos;
1225 	iocb->ki_pos = iter.pos;
1226 	return ret;
1227 }
1228 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1229 
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1230 static void iomap_write_delalloc_ifs_punch(struct inode *inode,
1231 		struct folio *folio, loff_t start_byte, loff_t end_byte,
1232 		struct iomap *iomap, iomap_punch_t punch)
1233 {
1234 	unsigned int first_blk, last_blk;
1235 	loff_t last_byte;
1236 	u8 blkbits = inode->i_blkbits;
1237 	struct iomap_folio_state *ifs;
1238 
1239 	/*
1240 	 * When we have per-block dirty tracking, there can be
1241 	 * blocks within a folio which are marked uptodate
1242 	 * but not dirty. In that case it is necessary to punch
1243 	 * out such blocks to avoid leaking any delalloc blocks.
1244 	 */
1245 	ifs = folio->private;
1246 	if (!ifs)
1247 		return;
1248 
1249 	last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
1250 	first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1251 	last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1252 	while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
1253 		       <= last_blk) {
1254 		punch(inode, folio_pos(folio) + (first_blk << blkbits),
1255 				1 << blkbits, iomap);
1256 		first_blk++;
1257 	}
1258 }
1259 
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1260 static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1261 		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1262 		struct iomap *iomap, iomap_punch_t punch)
1263 {
1264 	if (!folio_test_dirty(folio))
1265 		return;
1266 
1267 	/* if dirty, punch up to offset */
1268 	if (start_byte > *punch_start_byte) {
1269 		punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
1270 				iomap);
1271 	}
1272 
1273 	/* Punch non-dirty blocks within folio */
1274 	iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
1275 			iomap, punch);
1276 
1277 	/*
1278 	 * Make sure the next punch start is correctly bound to
1279 	 * the end of this data range, not the end of the folio.
1280 	 */
1281 	*punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
1282 }
1283 
1284 /*
1285  * Scan the data range passed to us for dirty page cache folios. If we find a
1286  * dirty folio, punch out the preceding range and update the offset from which
1287  * the next punch will start from.
1288  *
1289  * We can punch out storage reservations under clean pages because they either
1290  * contain data that has been written back - in which case the delalloc punch
1291  * over that range is a no-op - or they have been read faults in which case they
1292  * contain zeroes and we can remove the delalloc backing range and any new
1293  * writes to those pages will do the normal hole filling operation...
1294  *
1295  * This makes the logic simple: we only need to keep the delalloc extents only
1296  * over the dirty ranges of the page cache.
1297  *
1298  * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1299  * simplify range iterations.
1300  */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1301 static void iomap_write_delalloc_scan(struct inode *inode,
1302 		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1303 		struct iomap *iomap, iomap_punch_t punch)
1304 {
1305 	while (start_byte < end_byte) {
1306 		struct folio	*folio;
1307 
1308 		/* grab locked page */
1309 		folio = filemap_lock_folio(inode->i_mapping,
1310 				start_byte >> PAGE_SHIFT);
1311 		if (IS_ERR(folio)) {
1312 			start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1313 					PAGE_SIZE;
1314 			continue;
1315 		}
1316 
1317 		iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1318 				start_byte, end_byte, iomap, punch);
1319 
1320 		/* move offset to start of next folio in range */
1321 		start_byte = folio_next_pos(folio);
1322 		folio_unlock(folio);
1323 		folio_put(folio);
1324 	}
1325 }
1326 
1327 /*
1328  * When a short write occurs, the filesystem might need to use ->iomap_end
1329  * to remove space reservations created in ->iomap_begin.
1330  *
1331  * For filesystems that use delayed allocation, there can be dirty pages over
1332  * the delalloc extent outside the range of a short write but still within the
1333  * delalloc extent allocated for this iomap if the write raced with page
1334  * faults.
1335  *
1336  * Punch out all the delalloc blocks in the range given except for those that
1337  * have dirty data still pending in the page cache - those are going to be
1338  * written and so must still retain the delalloc backing for writeback.
1339  *
1340  * The punch() callback *must* only punch delalloc extents in the range passed
1341  * to it. It must skip over all other types of extents in the range and leave
1342  * them completely unchanged. It must do this punch atomically with respect to
1343  * other extent modifications.
1344  *
1345  * The punch() callback may be called with a folio locked to prevent writeback
1346  * extent allocation racing at the edge of the range we are currently punching.
1347  * The locked folio may or may not cover the range being punched, so it is not
1348  * safe for the punch() callback to lock folios itself.
1349  *
1350  * Lock order is:
1351  *
1352  * inode->i_rwsem (shared or exclusive)
1353  *   inode->i_mapping->invalidate_lock (exclusive)
1354  *     folio_lock()
1355  *       ->punch
1356  *         internal filesystem allocation lock
1357  *
1358  * As we are scanning the page cache for data, we don't need to reimplement the
1359  * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1360  * start and end of data ranges correctly even for sub-folio block sizes. This
1361  * byte range based iteration is especially convenient because it means we
1362  * don't have to care about variable size folios, nor where the start or end of
1363  * the data range lies within a folio, if they lie within the same folio or even
1364  * if there are multiple discontiguous data ranges within the folio.
1365  *
1366  * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1367  * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1368  * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1369  * date. A write page fault can then mark it dirty. If we then fail a write()
1370  * beyond EOF into that up to date cached range, we allocate a delalloc block
1371  * beyond EOF and then have to punch it out. Because the range is up to date,
1372  * mapping_seek_hole_data() will return it, and we will skip the punch because
1373  * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1374  * beyond EOF in this case as writeback will never write back and covert that
1375  * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1376  * resulting in always punching out the range from the EOF to the end of the
1377  * range the iomap spans.
1378  *
1379  * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1380  * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1381  * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1382  * returns the end of the data range (data_end). Using closed intervals would
1383  * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1384  * the code to subtle off-by-one bugs....
1385  */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,unsigned flags,struct iomap * iomap,iomap_punch_t punch)1386 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1387 		loff_t end_byte, unsigned flags, struct iomap *iomap,
1388 		iomap_punch_t punch)
1389 {
1390 	loff_t punch_start_byte = start_byte;
1391 	loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1392 
1393 	/*
1394 	 * The caller must hold invalidate_lock to avoid races with page faults
1395 	 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
1396 	 * we walk the cache and perform delalloc extent removal.  Failing to do
1397 	 * this can leave dirty pages with no space reservation in the cache.
1398 	 */
1399 	lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
1400 
1401 	while (start_byte < scan_end_byte) {
1402 		loff_t		data_end;
1403 
1404 		start_byte = mapping_seek_hole_data(inode->i_mapping,
1405 				start_byte, scan_end_byte, SEEK_DATA);
1406 		/*
1407 		 * If there is no more data to scan, all that is left is to
1408 		 * punch out the remaining range.
1409 		 *
1410 		 * Note that mapping_seek_hole_data is only supposed to return
1411 		 * either an offset or -ENXIO, so WARN on any other error as
1412 		 * that would be an API change without updating the callers.
1413 		 */
1414 		if (start_byte == -ENXIO || start_byte == scan_end_byte)
1415 			break;
1416 		if (WARN_ON_ONCE(start_byte < 0))
1417 			return;
1418 		WARN_ON_ONCE(start_byte < punch_start_byte);
1419 		WARN_ON_ONCE(start_byte > scan_end_byte);
1420 
1421 		/*
1422 		 * We find the end of this contiguous cached data range by
1423 		 * seeking from start_byte to the beginning of the next hole.
1424 		 */
1425 		data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1426 				scan_end_byte, SEEK_HOLE);
1427 		if (WARN_ON_ONCE(data_end < 0))
1428 			return;
1429 
1430 		/*
1431 		 * If we race with post-direct I/O invalidation of the page cache,
1432 		 * there might be no data left at start_byte.
1433 		 */
1434 		if (data_end == start_byte)
1435 			continue;
1436 
1437 		WARN_ON_ONCE(data_end < start_byte);
1438 		WARN_ON_ONCE(data_end > scan_end_byte);
1439 
1440 		iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
1441 				data_end, iomap, punch);
1442 
1443 		/* The next data search starts at the end of this one. */
1444 		start_byte = data_end;
1445 	}
1446 
1447 	if (punch_start_byte < end_byte)
1448 		punch(inode, punch_start_byte, end_byte - punch_start_byte,
1449 				iomap);
1450 }
1451 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
1452 
iomap_unshare_iter(struct iomap_iter * iter,const struct iomap_write_ops * write_ops)1453 static int iomap_unshare_iter(struct iomap_iter *iter,
1454 		const struct iomap_write_ops *write_ops)
1455 {
1456 	struct iomap *iomap = &iter->iomap;
1457 	u64 bytes = iomap_length(iter);
1458 	int status;
1459 
1460 	if (!iomap_want_unshare_iter(iter))
1461 		return iomap_iter_advance(iter, bytes);
1462 
1463 	do {
1464 		struct folio *folio;
1465 		size_t offset;
1466 		bool ret;
1467 
1468 		bytes = min_t(u64, SIZE_MAX, bytes);
1469 		status = iomap_write_begin(iter, write_ops, &folio, &offset,
1470 				&bytes);
1471 		if (unlikely(status))
1472 			return status;
1473 		if (iomap->flags & IOMAP_F_STALE)
1474 			break;
1475 
1476 		ret = iomap_write_end(iter, bytes, bytes, folio);
1477 		__iomap_put_folio(iter, write_ops, bytes, folio);
1478 		if (WARN_ON_ONCE(!ret))
1479 			return -EIO;
1480 
1481 		cond_resched();
1482 
1483 		balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1484 
1485 		status = iomap_iter_advance(iter, bytes);
1486 		if (status)
1487 			break;
1488 	} while ((bytes = iomap_length(iter)) > 0);
1489 
1490 	return status;
1491 }
1492 
1493 int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops)1494 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1495 		const struct iomap_ops *ops,
1496 		const struct iomap_write_ops *write_ops)
1497 {
1498 	struct iomap_iter iter = {
1499 		.inode		= inode,
1500 		.pos		= pos,
1501 		.flags		= IOMAP_WRITE | IOMAP_UNSHARE,
1502 	};
1503 	loff_t size = i_size_read(inode);
1504 	int ret;
1505 
1506 	if (pos < 0 || pos >= size)
1507 		return 0;
1508 
1509 	iter.len = min(len, size - pos);
1510 	while ((ret = iomap_iter(&iter, ops)) > 0)
1511 		iter.status = iomap_unshare_iter(&iter, write_ops);
1512 	return ret;
1513 }
1514 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1515 
1516 /*
1517  * Flush the remaining range of the iter and mark the current mapping stale.
1518  * This is used when zero range sees an unwritten mapping that may have had
1519  * dirty pagecache over it.
1520  */
iomap_zero_iter_flush_and_stale(struct iomap_iter * i)1521 static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
1522 {
1523 	struct address_space *mapping = i->inode->i_mapping;
1524 	loff_t end = i->pos + i->len - 1;
1525 
1526 	i->iomap.flags |= IOMAP_F_STALE;
1527 	return filemap_write_and_wait_range(mapping, i->pos, end);
1528 }
1529 
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero,const struct iomap_write_ops * write_ops)1530 static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
1531 		const struct iomap_write_ops *write_ops)
1532 {
1533 	u64 bytes = iomap_length(iter);
1534 	int status;
1535 
1536 	do {
1537 		struct folio *folio;
1538 		size_t offset;
1539 		bool ret;
1540 
1541 		bytes = min_t(u64, SIZE_MAX, bytes);
1542 		status = iomap_write_begin(iter, write_ops, &folio, &offset,
1543 				&bytes);
1544 		if (status)
1545 			return status;
1546 		if (iter->iomap.flags & IOMAP_F_STALE)
1547 			break;
1548 
1549 		/* a NULL folio means we're done with a folio batch */
1550 		if (!folio) {
1551 			status = iomap_iter_advance_full(iter);
1552 			break;
1553 		}
1554 
1555 		/* warn about zeroing folios beyond eof that won't write back */
1556 		WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
1557 
1558 		trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
1559 				bytes);
1560 
1561 		folio_zero_range(folio, offset, bytes);
1562 		folio_mark_accessed(folio);
1563 
1564 		ret = iomap_write_end(iter, bytes, bytes, folio);
1565 		__iomap_put_folio(iter, write_ops, bytes, folio);
1566 		if (WARN_ON_ONCE(!ret))
1567 			return -EIO;
1568 
1569 		status = iomap_iter_advance(iter, bytes);
1570 		if (status)
1571 			break;
1572 	} while ((bytes = iomap_length(iter)) > 0);
1573 
1574 	if (did_zero)
1575 		*did_zero = true;
1576 	return status;
1577 }
1578 
1579 /**
1580  * iomap_fill_dirty_folios - fill a folio batch with dirty folios
1581  * @iter: Iteration structure
1582  * @start: Start offset of range. Updated based on lookup progress.
1583  * @end: End offset of range
1584  * @iomap_flags: Flags to set on the associated iomap to track the batch.
1585  *
1586  * Returns the folio count directly. Also returns the associated control flag if
1587  * the the batch lookup is performed and the expected offset of a subsequent
1588  * lookup via out params. The caller is responsible to set the flag on the
1589  * associated iomap.
1590  */
1591 unsigned int
iomap_fill_dirty_folios(struct iomap_iter * iter,loff_t * start,loff_t end,unsigned int * iomap_flags)1592 iomap_fill_dirty_folios(
1593 	struct iomap_iter	*iter,
1594 	loff_t			*start,
1595 	loff_t			end,
1596 	unsigned int		*iomap_flags)
1597 {
1598 	struct address_space	*mapping = iter->inode->i_mapping;
1599 	pgoff_t			pstart = *start >> PAGE_SHIFT;
1600 	pgoff_t			pend = (end - 1) >> PAGE_SHIFT;
1601 	unsigned int		count;
1602 
1603 	if (!iter->fbatch) {
1604 		*start = end;
1605 		return 0;
1606 	}
1607 
1608 	count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
1609 	*start = (pstart << PAGE_SHIFT);
1610 	*iomap_flags |= IOMAP_F_FOLIO_BATCH;
1611 	return count;
1612 }
1613 EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
1614 
1615 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1616 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1617 		const struct iomap_ops *ops,
1618 		const struct iomap_write_ops *write_ops, void *private)
1619 {
1620 	struct folio_batch fbatch;
1621 	struct iomap_iter iter = {
1622 		.inode		= inode,
1623 		.pos		= pos,
1624 		.len		= len,
1625 		.flags		= IOMAP_ZERO,
1626 		.private	= private,
1627 		.fbatch		= &fbatch,
1628 	};
1629 	struct address_space *mapping = inode->i_mapping;
1630 	int ret;
1631 	bool range_dirty;
1632 
1633 	folio_batch_init(&fbatch);
1634 
1635 	/*
1636 	 * To avoid an unconditional flush, check pagecache state and only flush
1637 	 * if dirty and the fs returns a mapping that might convert on
1638 	 * writeback.
1639 	 */
1640 	range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
1641 					iter.pos + iter.len - 1);
1642 	while ((ret = iomap_iter(&iter, ops)) > 0) {
1643 		const struct iomap *srcmap = iomap_iter_srcmap(&iter);
1644 
1645 		if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1646 				 srcmap->type != IOMAP_UNWRITTEN))
1647 			return -EIO;
1648 
1649 		if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1650 		    (srcmap->type == IOMAP_HOLE ||
1651 		     srcmap->type == IOMAP_UNWRITTEN)) {
1652 			s64 status;
1653 
1654 			if (range_dirty) {
1655 				range_dirty = false;
1656 				status = iomap_zero_iter_flush_and_stale(&iter);
1657 			} else {
1658 				status = iomap_iter_advance_full(&iter);
1659 			}
1660 			iter.status = status;
1661 			continue;
1662 		}
1663 
1664 		iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
1665 	}
1666 	return ret;
1667 }
1668 EXPORT_SYMBOL_GPL(iomap_zero_range);
1669 
1670 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1671 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1672 		const struct iomap_ops *ops,
1673 		const struct iomap_write_ops *write_ops, void *private)
1674 {
1675 	unsigned int blocksize = i_blocksize(inode);
1676 	unsigned int off = pos & (blocksize - 1);
1677 
1678 	/* Block boundary? Nothing to do */
1679 	if (!off)
1680 		return 0;
1681 	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
1682 			write_ops, private);
1683 }
1684 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1685 
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1686 static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1687 		struct folio *folio)
1688 {
1689 	loff_t length = iomap_length(iter);
1690 	int ret;
1691 
1692 	if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1693 		ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1694 					      &iter->iomap);
1695 		if (ret)
1696 			return ret;
1697 		block_commit_write(folio, 0, length);
1698 	} else {
1699 		WARN_ON_ONCE(!folio_test_uptodate(folio));
1700 		folio_mark_dirty(folio);
1701 	}
1702 
1703 	return iomap_iter_advance(iter, length);
1704 }
1705 
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops,void * private)1706 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
1707 		void *private)
1708 {
1709 	struct iomap_iter iter = {
1710 		.inode		= file_inode(vmf->vma->vm_file),
1711 		.flags		= IOMAP_WRITE | IOMAP_FAULT,
1712 		.private	= private,
1713 	};
1714 	struct folio *folio = page_folio(vmf->page);
1715 	ssize_t ret;
1716 
1717 	folio_lock(folio);
1718 	ret = folio_mkwrite_check_truncate(folio, iter.inode);
1719 	if (ret < 0)
1720 		goto out_unlock;
1721 	iter.pos = folio_pos(folio);
1722 	iter.len = ret;
1723 	while ((ret = iomap_iter(&iter, ops)) > 0)
1724 		iter.status = iomap_folio_mkwrite_iter(&iter, folio);
1725 
1726 	if (ret < 0)
1727 		goto out_unlock;
1728 	folio_wait_stable(folio);
1729 	return VM_FAULT_LOCKED;
1730 out_unlock:
1731 	folio_unlock(folio);
1732 	return vmf_fs_error(ret);
1733 }
1734 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1735 
iomap_writeback_init(struct inode * inode,struct folio * folio)1736 static void iomap_writeback_init(struct inode *inode, struct folio *folio)
1737 {
1738 	struct iomap_folio_state *ifs = folio->private;
1739 
1740 	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1741 	if (ifs) {
1742 		WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1743 		/*
1744 		 * Set this to the folio size. After processing the folio for
1745 		 * writeback in iomap_writeback_folio(), we'll subtract any
1746 		 * ranges not written back.
1747 		 *
1748 		 * We do this because otherwise, we would have to atomically
1749 		 * increment ifs->write_bytes_pending every time a range in the
1750 		 * folio needs to be written back.
1751 		 */
1752 		atomic_set(&ifs->write_bytes_pending, folio_size(folio));
1753 	}
1754 }
1755 
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len)1756 void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1757 		size_t len)
1758 {
1759 	struct iomap_folio_state *ifs = folio->private;
1760 
1761 	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1762 	WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1763 
1764 	if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1765 		folio_end_writeback(folio);
1766 }
1767 EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
1768 
iomap_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 pos,u32 rlen,u64 end_pos,size_t * bytes_submitted)1769 static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
1770 		struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
1771 		size_t *bytes_submitted)
1772 {
1773 	do {
1774 		ssize_t ret;
1775 
1776 		ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
1777 		if (WARN_ON_ONCE(ret == 0 || ret > rlen))
1778 			return -EIO;
1779 		if (ret < 0)
1780 			return ret;
1781 		rlen -= ret;
1782 		pos += ret;
1783 
1784 		/*
1785 		 * Holes are not written back by ->writeback_range, so track
1786 		 * if we did handle anything that is not a hole here.
1787 		 */
1788 		if (wpc->iomap.type != IOMAP_HOLE)
1789 			*bytes_submitted += ret;
1790 	} while (rlen);
1791 
1792 	return 0;
1793 }
1794 
1795 /*
1796  * Check interaction of the folio with the file end.
1797  *
1798  * If the folio is entirely beyond i_size, return false.  If it straddles
1799  * i_size, adjust end_pos and zero all data beyond i_size.
1800  */
iomap_writeback_handle_eof(struct folio * folio,struct inode * inode,u64 * end_pos)1801 static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
1802 		u64 *end_pos)
1803 {
1804 	u64 isize = i_size_read(inode);
1805 
1806 	if (*end_pos > isize) {
1807 		size_t poff = offset_in_folio(folio, isize);
1808 		pgoff_t end_index = isize >> PAGE_SHIFT;
1809 
1810 		/*
1811 		 * If the folio is entirely ouside of i_size, skip it.
1812 		 *
1813 		 * This can happen due to a truncate operation that is in
1814 		 * progress and in that case truncate will finish it off once
1815 		 * we've dropped the folio lock.
1816 		 *
1817 		 * Note that the pgoff_t used for end_index is an unsigned long.
1818 		 * If the given offset is greater than 16TB on a 32-bit system,
1819 		 * then if we checked if the folio is fully outside i_size with
1820 		 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1821 		 * overflow and evaluate to 0.  Hence this folio would be
1822 		 * redirtied and written out repeatedly, which would result in
1823 		 * an infinite loop; the user program performing this operation
1824 		 * would hang.  Instead, we can detect this situation by
1825 		 * checking if the folio is totally beyond i_size or if its
1826 		 * offset is just equal to the EOF.
1827 		 */
1828 		if (folio->index > end_index ||
1829 		    (folio->index == end_index && poff == 0))
1830 			return false;
1831 
1832 		/*
1833 		 * The folio straddles i_size.
1834 		 *
1835 		 * It must be zeroed out on each and every writepage invocation
1836 		 * because it may be mmapped:
1837 		 *
1838 		 *    A file is mapped in multiples of the page size.  For a
1839 		 *    file that is not a multiple of the page size, the
1840 		 *    remaining memory is zeroed when mapped, and writes to that
1841 		 *    region are not written out to the file.
1842 		 *
1843 		 * Also adjust the end_pos to the end of file and skip writeback
1844 		 * for all blocks entirely beyond i_size.
1845 		 */
1846 		folio_zero_segment(folio, poff, folio_size(folio));
1847 		*end_pos = isize;
1848 	}
1849 
1850 	return true;
1851 }
1852 
iomap_writeback_folio(struct iomap_writepage_ctx * wpc,struct folio * folio)1853 int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
1854 {
1855 	struct iomap_folio_state *ifs = folio->private;
1856 	struct inode *inode = wpc->inode;
1857 	u64 pos = folio_pos(folio);
1858 	u64 end_pos = pos + folio_size(folio);
1859 	u64 end_aligned = 0;
1860 	loff_t orig_pos = pos;
1861 	size_t bytes_submitted = 0;
1862 	int error = 0;
1863 	u32 rlen;
1864 
1865 	WARN_ON_ONCE(!folio_test_locked(folio));
1866 	WARN_ON_ONCE(folio_test_dirty(folio));
1867 	WARN_ON_ONCE(folio_test_writeback(folio));
1868 
1869 	trace_iomap_writeback_folio(inode, pos, folio_size(folio));
1870 
1871 	if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
1872 		return 0;
1873 	WARN_ON_ONCE(end_pos <= pos);
1874 
1875 	if (i_blocks_per_folio(inode, folio) > 1) {
1876 		if (!ifs) {
1877 			ifs = ifs_alloc(inode, folio, 0);
1878 			iomap_set_range_dirty(folio, 0, end_pos - pos);
1879 		}
1880 
1881 		iomap_writeback_init(inode, folio);
1882 	}
1883 
1884 	/*
1885 	 * Set the writeback bit ASAP, as the I/O completion for the single
1886 	 * block per folio case happen hit as soon as we're submitting the bio.
1887 	 */
1888 	folio_start_writeback(folio);
1889 
1890 	/*
1891 	 * Walk through the folio to find dirty areas to write back.
1892 	 */
1893 	end_aligned = round_up(end_pos, i_blocksize(inode));
1894 	while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
1895 		error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
1896 				&bytes_submitted);
1897 		if (error)
1898 			break;
1899 		pos += rlen;
1900 	}
1901 
1902 	if (bytes_submitted)
1903 		wpc->nr_folios++;
1904 	if (error && pos > orig_pos)
1905 		fserror_report_io(inode, FSERR_BUFFERED_WRITE, orig_pos, 0,
1906 				  error, GFP_NOFS);
1907 
1908 	/*
1909 	 * We can have dirty bits set past end of file in page_mkwrite path
1910 	 * while mapping the last partial folio. Hence it's better to clear
1911 	 * all the dirty bits in the folio here.
1912 	 */
1913 	iomap_clear_range_dirty(folio, 0, folio_size(folio));
1914 
1915 	/*
1916 	 * Usually the writeback bit is cleared by the I/O completion handler.
1917 	 * But we may end up either not actually writing any blocks, or (when
1918 	 * there are multiple blocks in a folio) all I/O might have finished
1919 	 * already at this point.  In that case we need to clear the writeback
1920 	 * bit ourselves right after unlocking the page.
1921 	 */
1922 	if (ifs) {
1923 		/*
1924 		 * Subtract any bytes that were initially accounted to
1925 		 * write_bytes_pending but skipped for writeback.
1926 		 */
1927 		size_t bytes_not_submitted = folio_size(folio) -
1928 				bytes_submitted;
1929 
1930 		if (bytes_not_submitted)
1931 			iomap_finish_folio_write(inode, folio,
1932 					bytes_not_submitted);
1933 	} else if (!bytes_submitted) {
1934 		folio_end_writeback(folio);
1935 	}
1936 
1937 	mapping_set_error(inode->i_mapping, error);
1938 	return error;
1939 }
1940 EXPORT_SYMBOL_GPL(iomap_writeback_folio);
1941 
1942 int
iomap_writepages(struct iomap_writepage_ctx * wpc)1943 iomap_writepages(struct iomap_writepage_ctx *wpc)
1944 {
1945 	struct address_space *mapping = wpc->inode->i_mapping;
1946 	struct folio *folio = NULL;
1947 	int error;
1948 
1949 	/*
1950 	 * Writeback from reclaim context should never happen except in the case
1951 	 * of a VM regression so warn about it and refuse to write the data.
1952 	 */
1953 	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
1954 			PF_MEMALLOC))
1955 		return -EIO;
1956 
1957 	while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
1958 		error = iomap_writeback_folio(wpc, folio);
1959 		folio_unlock(folio);
1960 	}
1961 
1962 	/*
1963 	 * If @error is non-zero, it means that we have a situation where some
1964 	 * part of the submission process has failed after we've marked pages
1965 	 * for writeback.
1966 	 *
1967 	 * We cannot cancel the writeback directly in that case, so always call
1968 	 * ->writeback_submit to run the I/O completion handler to clear the
1969 	 * writeback bit and let the file system proess the errors.
1970 	 */
1971 	if (wpc->wb_ctx)
1972 		return wpc->ops->writeback_submit(wpc, error);
1973 	return error;
1974 }
1975 EXPORT_SYMBOL_GPL(iomap_writepages);
1976