1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2023 Christoph Hellwig.
5 */
6 #include <linux/iomap.h>
7 #include <linux/buffer_head.h>
8 #include <linux/writeback.h>
9 #include <linux/swap.h>
10 #include <linux/migrate.h>
11 #include <linux/fserror.h>
12 #include "internal.h"
13 #include "trace.h"
14
15 #include "../internal.h"
16
17 /*
18 * Structure allocated for each folio to track per-block uptodate, dirty state
19 * and I/O completions.
20 */
21 struct iomap_folio_state {
22 spinlock_t state_lock;
23 unsigned int read_bytes_pending;
24 atomic_t write_bytes_pending;
25
26 /*
27 * Each block has two bits in this bitmap:
28 * Bits [0..blocks_per_folio) has the uptodate status.
29 * Bits [b_p_f...(2*b_p_f)) has the dirty status.
30 */
31 unsigned long state[];
32 };
33
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)34 static inline bool ifs_is_fully_uptodate(struct folio *folio,
35 struct iomap_folio_state *ifs)
36 {
37 struct inode *inode = folio->mapping->host;
38
39 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
40 }
41
42 /*
43 * Find the next uptodate block in the folio. end_blk is inclusive.
44 * If no uptodate block is found, this will return end_blk + 1.
45 */
ifs_next_uptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk)46 static unsigned ifs_next_uptodate_block(struct folio *folio,
47 unsigned start_blk, unsigned end_blk)
48 {
49 struct iomap_folio_state *ifs = folio->private;
50
51 return find_next_bit(ifs->state, end_blk + 1, start_blk);
52 }
53
54 /*
55 * Find the next non-uptodate block in the folio. end_blk is inclusive.
56 * If no non-uptodate block is found, this will return end_blk + 1.
57 */
ifs_next_nonuptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk)58 static unsigned ifs_next_nonuptodate_block(struct folio *folio,
59 unsigned start_blk, unsigned end_blk)
60 {
61 struct iomap_folio_state *ifs = folio->private;
62
63 return find_next_zero_bit(ifs->state, end_blk + 1, start_blk);
64 }
65
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)66 static bool ifs_set_range_uptodate(struct folio *folio,
67 struct iomap_folio_state *ifs, size_t off, size_t len)
68 {
69 struct inode *inode = folio->mapping->host;
70 unsigned int first_blk = off >> inode->i_blkbits;
71 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
72 unsigned int nr_blks = last_blk - first_blk + 1;
73
74 bitmap_set(ifs->state, first_blk, nr_blks);
75 return ifs_is_fully_uptodate(folio, ifs);
76 }
77
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)78 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
79 size_t len)
80 {
81 struct iomap_folio_state *ifs = folio->private;
82 unsigned long flags;
83 bool mark_uptodate = true;
84
85 if (folio_test_uptodate(folio))
86 return;
87
88 if (ifs) {
89 spin_lock_irqsave(&ifs->state_lock, flags);
90 /*
91 * If a read with bytes pending is in progress, we must not call
92 * folio_mark_uptodate(). The read completion path
93 * (iomap_read_end()) will call folio_end_read(), which uses XOR
94 * semantics to set the uptodate bit. If we set it here, the XOR
95 * in folio_end_read() will clear it, leaving the folio not
96 * uptodate.
97 */
98 mark_uptodate = ifs_set_range_uptodate(folio, ifs, off, len) &&
99 !ifs->read_bytes_pending;
100 spin_unlock_irqrestore(&ifs->state_lock, flags);
101 }
102
103 if (mark_uptodate)
104 folio_mark_uptodate(folio);
105 }
106
107 /*
108 * Find the next dirty block in the folio. end_blk is inclusive.
109 * If no dirty block is found, this will return end_blk + 1.
110 */
ifs_next_dirty_block(struct folio * folio,unsigned start_blk,unsigned end_blk)111 static unsigned ifs_next_dirty_block(struct folio *folio,
112 unsigned start_blk, unsigned end_blk)
113 {
114 struct iomap_folio_state *ifs = folio->private;
115 struct inode *inode = folio->mapping->host;
116 unsigned int blks = i_blocks_per_folio(inode, folio);
117
118 return find_next_bit(ifs->state, blks + end_blk + 1,
119 blks + start_blk) - blks;
120 }
121
122 /*
123 * Find the next clean block in the folio. end_blk is inclusive.
124 * If no clean block is found, this will return end_blk + 1.
125 */
ifs_next_clean_block(struct folio * folio,unsigned start_blk,unsigned end_blk)126 static unsigned ifs_next_clean_block(struct folio *folio,
127 unsigned start_blk, unsigned end_blk)
128 {
129 struct iomap_folio_state *ifs = folio->private;
130 struct inode *inode = folio->mapping->host;
131 unsigned int blks = i_blocks_per_folio(inode, folio);
132
133 return find_next_zero_bit(ifs->state, blks + end_blk + 1,
134 blks + start_blk) - blks;
135 }
136
ifs_find_dirty_range(struct folio * folio,struct iomap_folio_state * ifs,u64 * range_start,u64 range_end)137 static unsigned ifs_find_dirty_range(struct folio *folio,
138 struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
139 {
140 struct inode *inode = folio->mapping->host;
141 unsigned start_blk =
142 offset_in_folio(folio, *range_start) >> inode->i_blkbits;
143 unsigned end_blk = min_not_zero(
144 offset_in_folio(folio, range_end) >> inode->i_blkbits,
145 i_blocks_per_folio(inode, folio)) - 1;
146 unsigned nblks;
147
148 start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
149 if (start_blk > end_blk)
150 return 0;
151 if (start_blk == end_blk)
152 nblks = 1;
153 else
154 nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
155 start_blk;
156
157 *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
158 return nblks << inode->i_blkbits;
159 }
160
iomap_find_dirty_range(struct folio * folio,u64 * range_start,u64 range_end)161 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
162 u64 range_end)
163 {
164 struct iomap_folio_state *ifs = folio->private;
165
166 if (*range_start >= range_end)
167 return 0;
168
169 if (ifs)
170 return ifs_find_dirty_range(folio, ifs, range_start, range_end);
171 return range_end - *range_start;
172 }
173
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)174 static void ifs_clear_range_dirty(struct folio *folio,
175 struct iomap_folio_state *ifs, size_t off, size_t len)
176 {
177 struct inode *inode = folio->mapping->host;
178 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
179 unsigned int first_blk = (off >> inode->i_blkbits);
180 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
181 unsigned int nr_blks = last_blk - first_blk + 1;
182 unsigned long flags;
183
184 spin_lock_irqsave(&ifs->state_lock, flags);
185 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
186 spin_unlock_irqrestore(&ifs->state_lock, flags);
187 }
188
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)189 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
190 {
191 struct iomap_folio_state *ifs = folio->private;
192
193 if (ifs)
194 ifs_clear_range_dirty(folio, ifs, off, len);
195 }
196
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)197 static void ifs_set_range_dirty(struct folio *folio,
198 struct iomap_folio_state *ifs, size_t off, size_t len)
199 {
200 struct inode *inode = folio->mapping->host;
201 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
202 unsigned int first_blk = (off >> inode->i_blkbits);
203 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
204 unsigned int nr_blks = last_blk - first_blk + 1;
205 unsigned long flags;
206
207 spin_lock_irqsave(&ifs->state_lock, flags);
208 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
209 spin_unlock_irqrestore(&ifs->state_lock, flags);
210 }
211
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)212 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
213 {
214 struct iomap_folio_state *ifs = folio->private;
215
216 if (ifs)
217 ifs_set_range_dirty(folio, ifs, off, len);
218 }
219
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)220 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
221 struct folio *folio, unsigned int flags)
222 {
223 struct iomap_folio_state *ifs = folio->private;
224 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
225 gfp_t gfp;
226
227 if (ifs || nr_blocks <= 1)
228 return ifs;
229
230 if (flags & IOMAP_NOWAIT)
231 gfp = GFP_NOWAIT;
232 else
233 gfp = GFP_NOFS | __GFP_NOFAIL;
234
235 /*
236 * ifs->state tracks two sets of state flags when the
237 * filesystem block size is smaller than the folio size.
238 * The first state tracks per-block uptodate and the
239 * second tracks per-block dirty state.
240 */
241 ifs = kzalloc_flex(*ifs, state, BITS_TO_LONGS(2 * nr_blocks), gfp);
242 if (!ifs)
243 return ifs;
244
245 spin_lock_init(&ifs->state_lock);
246 if (folio_test_uptodate(folio))
247 bitmap_set(ifs->state, 0, nr_blocks);
248 if (folio_test_dirty(folio))
249 bitmap_set(ifs->state, nr_blocks, nr_blocks);
250 folio_attach_private(folio, ifs);
251
252 return ifs;
253 }
254
ifs_free(struct folio * folio)255 static void ifs_free(struct folio *folio)
256 {
257 struct iomap_folio_state *ifs = folio_detach_private(folio);
258
259 if (!ifs)
260 return;
261 WARN_ON_ONCE(ifs->read_bytes_pending != 0);
262 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
263 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
264 folio_test_uptodate(folio));
265 kfree(ifs);
266 }
267
268 /*
269 * Calculate how many bytes to truncate based off the number of blocks to
270 * truncate and the end position to start truncating from.
271 */
iomap_bytes_to_truncate(loff_t end_pos,unsigned block_bits,unsigned blocks_truncated)272 static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits,
273 unsigned blocks_truncated)
274 {
275 unsigned block_size = 1 << block_bits;
276 unsigned block_offset = end_pos & (block_size - 1);
277
278 if (!block_offset)
279 return blocks_truncated << block_bits;
280
281 return ((blocks_truncated - 1) << block_bits) + block_offset;
282 }
283
284 /*
285 * Calculate the range inside the folio that we actually need to read.
286 */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)287 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
288 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
289 {
290 struct iomap_folio_state *ifs = folio->private;
291 loff_t orig_pos = *pos;
292 loff_t isize = i_size_read(inode);
293 unsigned block_bits = inode->i_blkbits;
294 unsigned block_size = (1 << block_bits);
295 size_t poff = offset_in_folio(folio, *pos);
296 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
297 size_t orig_plen = plen;
298 unsigned first = poff >> block_bits;
299 unsigned last = (poff + plen - 1) >> block_bits;
300
301 /*
302 * If the block size is smaller than the page size, we need to check the
303 * per-block uptodate status and adjust the offset and length if needed
304 * to avoid reading in already uptodate ranges.
305 */
306 if (ifs) {
307 unsigned int next, blocks_skipped;
308
309 next = ifs_next_nonuptodate_block(folio, first, last);
310 blocks_skipped = next - first;
311
312 if (blocks_skipped) {
313 unsigned long block_offset = *pos & (block_size - 1);
314 unsigned bytes_skipped =
315 (blocks_skipped << block_bits) - block_offset;
316
317 *pos += bytes_skipped;
318 poff += bytes_skipped;
319 plen -= bytes_skipped;
320 }
321 first = next;
322
323 /* truncate len if we find any trailing uptodate block(s) */
324 if (++next <= last) {
325 next = ifs_next_uptodate_block(folio, next, last);
326 if (next <= last) {
327 plen -= iomap_bytes_to_truncate(*pos + plen,
328 block_bits, last - next + 1);
329 last = next - 1;
330 }
331 }
332 }
333
334 /*
335 * If the extent spans the block that contains the i_size, we need to
336 * handle both halves separately so that we properly zero data in the
337 * page cache for blocks that are entirely outside of i_size.
338 */
339 if (orig_pos <= isize && orig_pos + orig_plen > isize) {
340 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
341
342 if (first <= end && last > end)
343 plen -= iomap_bytes_to_truncate(*pos + plen, block_bits,
344 last - end);
345 }
346
347 *offp = poff;
348 *lenp = plen;
349 }
350
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)351 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
352 loff_t pos)
353 {
354 const struct iomap *srcmap = iomap_iter_srcmap(iter);
355
356 return srcmap->type != IOMAP_MAPPED ||
357 (srcmap->flags & IOMAP_F_NEW) ||
358 pos >= i_size_read(iter->inode);
359 }
360
361 /**
362 * iomap_read_inline_data - copy inline data into the page cache
363 * @iter: iteration structure
364 * @folio: folio to copy to
365 *
366 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
367 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
368 * Returns zero for success to complete the read, or the usual negative errno.
369 */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)370 static int iomap_read_inline_data(const struct iomap_iter *iter,
371 struct folio *folio)
372 {
373 const struct iomap *iomap = iomap_iter_srcmap(iter);
374 size_t size = i_size_read(iter->inode) - iomap->offset;
375 size_t offset = offset_in_folio(folio, iomap->offset);
376
377 if (WARN_ON_ONCE(!iomap->inline_data))
378 return -EIO;
379
380 if (folio_test_uptodate(folio))
381 return 0;
382
383 if (WARN_ON_ONCE(size > iomap->length)) {
384 fserror_report_io(iter->inode, FSERR_BUFFERED_READ,
385 iomap->offset, size, -EIO, GFP_NOFS);
386 return -EIO;
387 }
388 if (offset > 0)
389 ifs_alloc(iter->inode, folio, iter->flags);
390
391 folio_fill_tail(folio, offset, iomap->inline_data, size);
392 iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
393 return 0;
394 }
395
iomap_finish_folio_read(struct folio * folio,size_t off,size_t len,int error)396 void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
397 int error)
398 {
399 struct iomap_folio_state *ifs = folio->private;
400 bool uptodate = !error;
401 bool finished = true;
402
403 if (ifs) {
404 unsigned long flags;
405
406 spin_lock_irqsave(&ifs->state_lock, flags);
407 if (!error)
408 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
409 ifs->read_bytes_pending -= len;
410 finished = !ifs->read_bytes_pending;
411 spin_unlock_irqrestore(&ifs->state_lock, flags);
412 }
413
414 if (error)
415 fserror_report_io(folio->mapping->host, FSERR_BUFFERED_READ,
416 folio_pos(folio) + off, len, error,
417 GFP_ATOMIC);
418
419 if (finished)
420 folio_end_read(folio, uptodate);
421 }
422 EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
423
iomap_read_init(struct folio * folio)424 static void iomap_read_init(struct folio *folio)
425 {
426 struct iomap_folio_state *ifs = folio->private;
427
428 if (ifs) {
429 /*
430 * ifs->read_bytes_pending is used to track how many bytes are
431 * read in asynchronously by the IO helper. We need to track
432 * this so that we can know when the IO helper has finished
433 * reading in all the necessary ranges of the folio and can end
434 * the read.
435 *
436 * Increase ->read_bytes_pending by the folio size to start.
437 * We'll subtract any uptodate / zeroed ranges that did not
438 * require IO in iomap_read_end() after we're done processing
439 * the folio.
440 *
441 * We do this because otherwise, we would have to increment
442 * ifs->read_bytes_pending every time a range in the folio needs
443 * to be read in, which can get expensive since the spinlock
444 * needs to be held whenever modifying ifs->read_bytes_pending.
445 */
446 spin_lock_irq(&ifs->state_lock);
447 WARN_ON_ONCE(ifs->read_bytes_pending != 0);
448 ifs->read_bytes_pending = folio_size(folio);
449 spin_unlock_irq(&ifs->state_lock);
450 }
451 }
452
453 /*
454 * This ends IO if no bytes were submitted to an IO helper.
455 *
456 * Otherwise, this calibrates ifs->read_bytes_pending to represent only the
457 * submitted bytes (see comment in iomap_read_init()). If all bytes submitted
458 * have already been completed by the IO helper, then this will end the read.
459 * Else the IO helper will end the read after all submitted ranges have been
460 * read.
461 */
iomap_read_end(struct folio * folio,size_t bytes_submitted)462 static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
463 {
464 struct iomap_folio_state *ifs = folio->private;
465
466 if (ifs) {
467 bool end_read, uptodate;
468
469 spin_lock_irq(&ifs->state_lock);
470 if (!ifs->read_bytes_pending) {
471 WARN_ON_ONCE(bytes_submitted);
472 spin_unlock_irq(&ifs->state_lock);
473 folio_unlock(folio);
474 return;
475 }
476
477 /*
478 * Subtract any bytes that were initially accounted to
479 * read_bytes_pending but skipped for IO.
480 */
481 ifs->read_bytes_pending -= folio_size(folio) - bytes_submitted;
482
483 /*
484 * If !ifs->read_bytes_pending, this means all pending reads by
485 * the IO helper have already completed, which means we need to
486 * end the folio read here. If ifs->read_bytes_pending != 0,
487 * the IO helper will end the folio read.
488 */
489 end_read = !ifs->read_bytes_pending;
490 if (end_read)
491 uptodate = ifs_is_fully_uptodate(folio, ifs);
492 spin_unlock_irq(&ifs->state_lock);
493 if (end_read)
494 folio_end_read(folio, uptodate);
495 } else {
496 /*
497 * If a folio without an ifs is submitted to the IO helper, the
498 * read must be on the entire folio and the IO helper takes
499 * ownership of the folio. This means we should only enter
500 * iomap_read_end() for the !ifs case if no bytes were submitted
501 * to the IO helper, in which case we are responsible for
502 * unlocking the folio here.
503 */
504 WARN_ON_ONCE(bytes_submitted);
505 folio_unlock(folio);
506 }
507 }
508
iomap_read_folio_iter(struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t * bytes_submitted)509 static int iomap_read_folio_iter(struct iomap_iter *iter,
510 struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted)
511 {
512 const struct iomap *iomap = &iter->iomap;
513 loff_t pos = iter->pos;
514 loff_t length = iomap_length(iter);
515 struct folio *folio = ctx->cur_folio;
516 size_t folio_len = folio_size(folio);
517 struct iomap_folio_state *ifs;
518 size_t poff, plen;
519 loff_t pos_diff;
520 int ret;
521
522 if (iomap->type == IOMAP_INLINE) {
523 ret = iomap_read_inline_data(iter, folio);
524 if (ret)
525 return ret;
526 return iomap_iter_advance(iter, length);
527 }
528
529 ifs = ifs_alloc(iter->inode, folio, iter->flags);
530
531 length = min_t(loff_t, length, folio_len - offset_in_folio(folio, pos));
532 while (length) {
533 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
534 &plen);
535
536 pos_diff = pos - iter->pos;
537 if (WARN_ON_ONCE(pos_diff + plen > length))
538 return -EIO;
539
540 ret = iomap_iter_advance(iter, pos_diff);
541 if (ret)
542 return ret;
543
544 if (plen == 0)
545 return 0;
546
547 /* zero post-eof blocks as the page may be mapped */
548 if (iomap_block_needs_zeroing(iter, pos)) {
549 folio_zero_range(folio, poff, plen);
550 iomap_set_range_uptodate(folio, poff, plen);
551 } else {
552 if (!*bytes_submitted)
553 iomap_read_init(folio);
554 ret = ctx->ops->read_folio_range(iter, ctx, plen);
555 if (ret < 0)
556 fserror_report_io(iter->inode,
557 FSERR_BUFFERED_READ, pos,
558 plen, ret, GFP_NOFS);
559 if (ret)
560 return ret;
561
562 *bytes_submitted += plen;
563 /*
564 * Hand off folio ownership to the IO helper when:
565 * 1) The entire folio has been submitted for IO, or
566 * 2) There is no ifs attached to the folio
567 *
568 * Case (2) occurs when 1 << i_blkbits matches the folio
569 * size but the underlying filesystem or block device
570 * uses a smaller granularity for IO.
571 */
572 if (*bytes_submitted == folio_len || !ifs)
573 ctx->cur_folio = NULL;
574 }
575
576 ret = iomap_iter_advance(iter, plen);
577 if (ret)
578 return ret;
579 length -= pos_diff + plen;
580 pos = iter->pos;
581 }
582 return 0;
583 }
584
iomap_read_folio(const struct iomap_ops * ops,struct iomap_read_folio_ctx * ctx,void * private)585 void iomap_read_folio(const struct iomap_ops *ops,
586 struct iomap_read_folio_ctx *ctx, void *private)
587 {
588 struct folio *folio = ctx->cur_folio;
589 struct iomap_iter iter = {
590 .inode = folio->mapping->host,
591 .pos = folio_pos(folio),
592 .len = folio_size(folio),
593 .private = private,
594 };
595 size_t bytes_submitted = 0;
596 int ret;
597
598 trace_iomap_readpage(iter.inode, 1);
599
600 while ((ret = iomap_iter(&iter, ops)) > 0)
601 iter.status = iomap_read_folio_iter(&iter, ctx,
602 &bytes_submitted);
603
604 if (ctx->ops->submit_read)
605 ctx->ops->submit_read(ctx);
606
607 if (ctx->cur_folio)
608 iomap_read_end(ctx->cur_folio, bytes_submitted);
609 }
610 EXPORT_SYMBOL_GPL(iomap_read_folio);
611
iomap_readahead_iter(struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t * cur_bytes_submitted)612 static int iomap_readahead_iter(struct iomap_iter *iter,
613 struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted)
614 {
615 int ret;
616
617 while (iomap_length(iter)) {
618 if (ctx->cur_folio &&
619 offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
620 iomap_read_end(ctx->cur_folio, *cur_bytes_submitted);
621 ctx->cur_folio = NULL;
622 }
623 if (!ctx->cur_folio) {
624 ctx->cur_folio = readahead_folio(ctx->rac);
625 if (WARN_ON_ONCE(!ctx->cur_folio))
626 return -EINVAL;
627 *cur_bytes_submitted = 0;
628 }
629 ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
630 if (ret)
631 return ret;
632 }
633
634 return 0;
635 }
636
637 /**
638 * iomap_readahead - Attempt to read pages from a file.
639 * @ops: The operations vector for the filesystem.
640 * @ctx: The ctx used for issuing readahead.
641 * @private: The filesystem-specific information for issuing iomap_iter.
642 *
643 * This function is for filesystems to call to implement their readahead
644 * address_space operation.
645 *
646 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
647 * blocks from disc), and may wait for it. The caller may be trying to
648 * access a different page, and so sleeping excessively should be avoided.
649 * It may allocate memory, but should avoid costly allocations. This
650 * function is called with memalloc_nofs set, so allocations will not cause
651 * the filesystem to be reentered.
652 */
iomap_readahead(const struct iomap_ops * ops,struct iomap_read_folio_ctx * ctx,void * private)653 void iomap_readahead(const struct iomap_ops *ops,
654 struct iomap_read_folio_ctx *ctx, void *private)
655 {
656 struct readahead_control *rac = ctx->rac;
657 struct iomap_iter iter = {
658 .inode = rac->mapping->host,
659 .pos = readahead_pos(rac),
660 .len = readahead_length(rac),
661 .private = private,
662 };
663 size_t cur_bytes_submitted;
664
665 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
666
667 while (iomap_iter(&iter, ops) > 0)
668 iter.status = iomap_readahead_iter(&iter, ctx,
669 &cur_bytes_submitted);
670
671 if (ctx->ops->submit_read)
672 ctx->ops->submit_read(ctx);
673
674 if (ctx->cur_folio)
675 iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
676 }
677 EXPORT_SYMBOL_GPL(iomap_readahead);
678
679 /*
680 * iomap_is_partially_uptodate checks whether blocks within a folio are
681 * uptodate or not.
682 *
683 * Returns true if all blocks which correspond to the specified part
684 * of the folio are uptodate.
685 */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)686 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
687 {
688 struct iomap_folio_state *ifs = folio->private;
689 struct inode *inode = folio->mapping->host;
690 unsigned first, last;
691
692 if (!ifs)
693 return false;
694
695 /* Caller's range may extend past the end of this folio */
696 count = min(folio_size(folio) - from, count);
697
698 /* First and last blocks in range within folio */
699 first = from >> inode->i_blkbits;
700 last = (from + count - 1) >> inode->i_blkbits;
701
702 return ifs_next_nonuptodate_block(folio, first, last) > last;
703 }
704 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
705
706 /**
707 * iomap_get_folio - get a folio reference for writing
708 * @iter: iteration structure
709 * @pos: start offset of write
710 * @len: Suggested size of folio to create.
711 *
712 * Returns a locked reference to the folio at @pos, or an error pointer if the
713 * folio could not be obtained.
714 */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)715 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
716 {
717 fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
718
719 if (iter->flags & IOMAP_NOWAIT)
720 fgp |= FGP_NOWAIT;
721 if (iter->flags & IOMAP_DONTCACHE)
722 fgp |= FGP_DONTCACHE;
723 fgp |= fgf_set_order(len);
724
725 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
726 fgp, mapping_gfp_mask(iter->inode->i_mapping));
727 }
728 EXPORT_SYMBOL_GPL(iomap_get_folio);
729
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)730 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
731 {
732 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
733 folio_size(folio));
734
735 /*
736 * If the folio is dirty, we refuse to release our metadata because
737 * it may be partially dirty. Once we track per-block dirty state,
738 * we can release the metadata if every block is dirty.
739 */
740 if (folio_test_dirty(folio))
741 return false;
742 ifs_free(folio);
743 return true;
744 }
745 EXPORT_SYMBOL_GPL(iomap_release_folio);
746
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)747 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
748 {
749 trace_iomap_invalidate_folio(folio->mapping->host,
750 folio_pos(folio) + offset, len);
751
752 /*
753 * If we're invalidating the entire folio, clear the dirty state
754 * from it and release it to avoid unnecessary buildup of the LRU.
755 */
756 if (offset == 0 && len == folio_size(folio)) {
757 WARN_ON_ONCE(folio_test_writeback(folio));
758 folio_cancel_dirty(folio);
759 ifs_free(folio);
760 }
761 }
762 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
763
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)764 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
765 {
766 struct inode *inode = mapping->host;
767 size_t len = folio_size(folio);
768
769 ifs_alloc(inode, folio, 0);
770 iomap_set_range_dirty(folio, 0, len);
771 return filemap_dirty_folio(mapping, folio);
772 }
773 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
774
775 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)776 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
777 {
778 loff_t i_size = i_size_read(inode);
779
780 /*
781 * Only truncate newly allocated pages beyoned EOF, even if the
782 * write started inside the existing inode size.
783 */
784 if (pos + len > i_size)
785 truncate_pagecache_range(inode, max(pos, i_size),
786 pos + len - 1);
787 }
788
__iomap_write_begin(const struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t len,struct folio * folio)789 static int __iomap_write_begin(const struct iomap_iter *iter,
790 const struct iomap_write_ops *write_ops, size_t len,
791 struct folio *folio)
792 {
793 struct iomap_folio_state *ifs;
794 loff_t pos = iter->pos;
795 loff_t block_size = i_blocksize(iter->inode);
796 loff_t block_start = round_down(pos, block_size);
797 loff_t block_end = round_up(pos + len, block_size);
798 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
799 size_t from = offset_in_folio(folio, pos), to = from + len;
800 size_t poff, plen;
801
802 /*
803 * If the write or zeroing completely overlaps the current folio, then
804 * entire folio will be dirtied so there is no need for
805 * per-block state tracking structures to be attached to this folio.
806 * For the unshare case, we must read in the ondisk contents because we
807 * are not changing pagecache contents.
808 */
809 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
810 pos + len >= folio_next_pos(folio))
811 return 0;
812
813 ifs = ifs_alloc(iter->inode, folio, iter->flags);
814 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
815 return -EAGAIN;
816
817 if (folio_test_uptodate(folio))
818 return 0;
819
820 do {
821 iomap_adjust_read_range(iter->inode, folio, &block_start,
822 block_end - block_start, &poff, &plen);
823 if (plen == 0)
824 break;
825
826 /*
827 * If the read range will be entirely overwritten by the write,
828 * we can skip having to zero/read it in.
829 */
830 if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
831 to >= poff + plen)
832 continue;
833
834 if (iomap_block_needs_zeroing(iter, block_start)) {
835 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
836 return -EIO;
837 folio_zero_segments(folio, poff, from, to, poff + plen);
838 } else {
839 int status;
840
841 if (iter->flags & IOMAP_NOWAIT)
842 return -EAGAIN;
843
844 if (write_ops && write_ops->read_folio_range)
845 status = write_ops->read_folio_range(iter,
846 folio, block_start, plen);
847 else
848 status = iomap_bio_read_folio_range_sync(iter,
849 folio, block_start, plen);
850 if (status < 0)
851 fserror_report_io(iter->inode,
852 FSERR_BUFFERED_READ, pos,
853 len, status, GFP_NOFS);
854 if (status)
855 return status;
856 }
857 iomap_set_range_uptodate(folio, poff, plen);
858 } while ((block_start += plen) < block_end);
859
860 return 0;
861 }
862
__iomap_get_folio(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t len)863 static struct folio *__iomap_get_folio(struct iomap_iter *iter,
864 const struct iomap_write_ops *write_ops, size_t len)
865 {
866 loff_t pos = iter->pos;
867
868 if (!mapping_large_folio_support(iter->inode->i_mapping))
869 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
870
871 if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
872 struct folio *folio = folio_batch_next(iter->fbatch);
873
874 if (!folio)
875 return NULL;
876
877 /*
878 * The folio mapping generally shouldn't have changed based on
879 * fs locks, but be consistent with filemap lookup and retry
880 * the iter if it does.
881 */
882 folio_lock(folio);
883 if (unlikely(folio->mapping != iter->inode->i_mapping)) {
884 iter->iomap.flags |= IOMAP_F_STALE;
885 folio_unlock(folio);
886 return NULL;
887 }
888
889 folio_get(folio);
890 folio_wait_stable(folio);
891 return folio;
892 }
893
894 if (write_ops && write_ops->get_folio)
895 return write_ops->get_folio(iter, pos, len);
896 return iomap_get_folio(iter, pos, len);
897 }
898
__iomap_put_folio(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t ret,struct folio * folio)899 static void __iomap_put_folio(struct iomap_iter *iter,
900 const struct iomap_write_ops *write_ops, size_t ret,
901 struct folio *folio)
902 {
903 loff_t pos = iter->pos;
904
905 if (write_ops && write_ops->put_folio) {
906 write_ops->put_folio(iter->inode, pos, ret, folio);
907 } else {
908 folio_unlock(folio);
909 folio_put(folio);
910 }
911 }
912
913 /* trim pos and bytes to within a given folio */
iomap_trim_folio_range(struct iomap_iter * iter,struct folio * folio,size_t * offset,u64 * bytes)914 static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
915 struct folio *folio, size_t *offset, u64 *bytes)
916 {
917 loff_t pos = iter->pos;
918 size_t fsize = folio_size(folio);
919
920 WARN_ON_ONCE(pos < folio_pos(folio));
921 WARN_ON_ONCE(pos >= folio_pos(folio) + fsize);
922
923 *offset = offset_in_folio(folio, pos);
924 *bytes = min(*bytes, fsize - *offset);
925
926 return pos;
927 }
928
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)929 static int iomap_write_begin_inline(const struct iomap_iter *iter,
930 struct folio *folio)
931 {
932 /* needs more work for the tailpacking case; disable for now */
933 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
934 return -EIO;
935 return iomap_read_inline_data(iter, folio);
936 }
937
938 /*
939 * Grab and prepare a folio for write based on iter state. Returns the folio,
940 * offset, and length. Callers can optionally pass a max length *plen,
941 * otherwise init to zero.
942 */
iomap_write_begin(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,struct folio ** foliop,size_t * poffset,u64 * plen)943 static int iomap_write_begin(struct iomap_iter *iter,
944 const struct iomap_write_ops *write_ops, struct folio **foliop,
945 size_t *poffset, u64 *plen)
946 {
947 const struct iomap *srcmap = iomap_iter_srcmap(iter);
948 loff_t pos;
949 u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
950 struct folio *folio;
951 int status = 0;
952
953 len = min_not_zero(len, *plen);
954 *foliop = NULL;
955 *plen = 0;
956
957 if (fatal_signal_pending(current))
958 return -EINTR;
959
960 folio = __iomap_get_folio(iter, write_ops, len);
961 if (IS_ERR(folio))
962 return PTR_ERR(folio);
963
964 /*
965 * No folio means we're done with a batch. We still have range to
966 * process so return and let the caller iterate and refill the batch.
967 */
968 if (!folio) {
969 WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
970 return 0;
971 }
972
973 /*
974 * Now we have a locked folio, before we do anything with it we need to
975 * check that the iomap we have cached is not stale. The inode extent
976 * mapping can change due to concurrent IO in flight (e.g.
977 * IOMAP_UNWRITTEN state can change and memory reclaim could have
978 * reclaimed a previously partially written page at this index after IO
979 * completion before this write reaches this file offset) and hence we
980 * could do the wrong thing here (zero a page range incorrectly or fail
981 * to zero) and corrupt data.
982 */
983 if (write_ops && write_ops->iomap_valid) {
984 bool iomap_valid = write_ops->iomap_valid(iter->inode,
985 &iter->iomap);
986 if (!iomap_valid) {
987 iter->iomap.flags |= IOMAP_F_STALE;
988 status = 0;
989 goto out_unlock;
990 }
991 }
992
993 /*
994 * The folios in a batch may not be contiguous. If we've skipped
995 * forward, advance the iter to the pos of the current folio. If the
996 * folio starts beyond the end of the mapping, it may have been trimmed
997 * since the lookup for whatever reason. Return a NULL folio to
998 * terminate the op.
999 */
1000 if (folio_pos(folio) > iter->pos) {
1001 len = min_t(u64, folio_pos(folio) - iter->pos,
1002 iomap_length(iter));
1003 status = iomap_iter_advance(iter, len);
1004 len = iomap_length(iter);
1005 if (status || !len)
1006 goto out_unlock;
1007 }
1008
1009 pos = iomap_trim_folio_range(iter, folio, poffset, &len);
1010
1011 if (srcmap->type == IOMAP_INLINE)
1012 status = iomap_write_begin_inline(iter, folio);
1013 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
1014 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
1015 else
1016 status = __iomap_write_begin(iter, write_ops, len, folio);
1017
1018 if (unlikely(status))
1019 goto out_unlock;
1020
1021 *foliop = folio;
1022 *plen = len;
1023 return 0;
1024
1025 out_unlock:
1026 __iomap_put_folio(iter, write_ops, 0, folio);
1027 return status;
1028 }
1029
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)1030 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
1031 size_t copied, struct folio *folio)
1032 {
1033 flush_dcache_folio(folio);
1034
1035 /*
1036 * The blocks that were entirely written will now be uptodate, so we
1037 * don't have to worry about a read_folio reading them and overwriting a
1038 * partial write. However, if we've encountered a short write and only
1039 * partially written into a block, it will not be marked uptodate, so a
1040 * read_folio might come in and destroy our partial write.
1041 *
1042 * Do the simplest thing and just treat any short write to a
1043 * non-uptodate page as a zero-length write, and force the caller to
1044 * redo the whole thing.
1045 */
1046 if (unlikely(copied < len && !folio_test_uptodate(folio)))
1047 return false;
1048 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
1049 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
1050 filemap_dirty_folio(inode->i_mapping, folio);
1051 return true;
1052 }
1053
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)1054 static bool iomap_write_end_inline(const struct iomap_iter *iter,
1055 struct folio *folio, loff_t pos, size_t copied)
1056 {
1057 const struct iomap *iomap = &iter->iomap;
1058 void *addr;
1059
1060 WARN_ON_ONCE(!folio_test_uptodate(folio));
1061 BUG_ON(!iomap_inline_data_valid(iomap));
1062
1063 if (WARN_ON_ONCE(!iomap->inline_data))
1064 return false;
1065
1066 flush_dcache_folio(folio);
1067 addr = kmap_local_folio(folio, pos);
1068 memcpy(iomap_inline_data(iomap, pos), addr, copied);
1069 kunmap_local(addr);
1070
1071 mark_inode_dirty(iter->inode);
1072 return true;
1073 }
1074
1075 /*
1076 * Returns true if all copied bytes have been written to the pagecache,
1077 * otherwise return false.
1078 */
iomap_write_end(struct iomap_iter * iter,size_t len,size_t copied,struct folio * folio)1079 static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
1080 struct folio *folio)
1081 {
1082 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1083 loff_t pos = iter->pos;
1084
1085 if (srcmap->type == IOMAP_INLINE)
1086 return iomap_write_end_inline(iter, folio, pos, copied);
1087
1088 if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
1089 size_t bh_written;
1090
1091 bh_written = block_write_end(pos, len, copied, folio);
1092 WARN_ON_ONCE(bh_written != copied && bh_written != 0);
1093 return bh_written == copied;
1094 }
1095
1096 return __iomap_write_end(iter->inode, pos, len, copied, folio);
1097 }
1098
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i,const struct iomap_write_ops * write_ops)1099 static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
1100 const struct iomap_write_ops *write_ops)
1101 {
1102 ssize_t total_written = 0;
1103 int status = 0;
1104 struct address_space *mapping = iter->inode->i_mapping;
1105 size_t chunk = mapping_max_folio_size(mapping);
1106 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
1107
1108 do {
1109 struct folio *folio;
1110 loff_t old_size;
1111 size_t offset; /* Offset into folio */
1112 u64 bytes; /* Bytes to write to folio */
1113 size_t copied; /* Bytes copied from user */
1114 u64 written; /* Bytes have been written */
1115 loff_t pos;
1116
1117 bytes = iov_iter_count(i);
1118 retry:
1119 offset = iter->pos & (chunk - 1);
1120 bytes = min(chunk - offset, bytes);
1121 status = balance_dirty_pages_ratelimited_flags(mapping,
1122 bdp_flags);
1123 if (unlikely(status))
1124 break;
1125
1126 if (bytes > iomap_length(iter))
1127 bytes = iomap_length(iter);
1128
1129 /*
1130 * Bring in the user page that we'll copy from _first_.
1131 * Otherwise there's a nasty deadlock on copying from the
1132 * same page as we're writing to, without it being marked
1133 * up-to-date.
1134 *
1135 * For async buffered writes the assumption is that the user
1136 * page has already been faulted in. This can be optimized by
1137 * faulting the user page.
1138 */
1139 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
1140 status = -EFAULT;
1141 break;
1142 }
1143
1144 status = iomap_write_begin(iter, write_ops, &folio, &offset,
1145 &bytes);
1146 if (unlikely(status)) {
1147 iomap_write_failed(iter->inode, iter->pos, bytes);
1148 break;
1149 }
1150 if (iter->iomap.flags & IOMAP_F_STALE)
1151 break;
1152
1153 pos = iter->pos;
1154
1155 if (mapping_writably_mapped(mapping))
1156 flush_dcache_folio(folio);
1157
1158 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
1159 written = iomap_write_end(iter, bytes, copied, folio) ?
1160 copied : 0;
1161
1162 /*
1163 * Update the in-memory inode size after copying the data into
1164 * the page cache. It's up to the file system to write the
1165 * updated size to disk, preferably after I/O completion so that
1166 * no stale data is exposed. Only once that's done can we
1167 * unlock and release the folio.
1168 */
1169 old_size = iter->inode->i_size;
1170 if (pos + written > old_size) {
1171 i_size_write(iter->inode, pos + written);
1172 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
1173 }
1174 __iomap_put_folio(iter, write_ops, written, folio);
1175
1176 if (old_size < pos)
1177 pagecache_isize_extended(iter->inode, old_size, pos);
1178
1179 cond_resched();
1180 if (unlikely(written == 0)) {
1181 /*
1182 * A short copy made iomap_write_end() reject the
1183 * thing entirely. Might be memory poisoning
1184 * halfway through, might be a race with munmap,
1185 * might be severe memory pressure.
1186 */
1187 iomap_write_failed(iter->inode, pos, bytes);
1188 iov_iter_revert(i, copied);
1189
1190 if (chunk > PAGE_SIZE)
1191 chunk /= 2;
1192 if (copied) {
1193 bytes = copied;
1194 goto retry;
1195 }
1196 } else {
1197 total_written += written;
1198 iomap_iter_advance(iter, written);
1199 }
1200 } while (iov_iter_count(i) && iomap_length(iter));
1201
1202 return total_written ? 0 : status;
1203 }
1204
1205 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1206 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1207 const struct iomap_ops *ops,
1208 const struct iomap_write_ops *write_ops, void *private)
1209 {
1210 struct iomap_iter iter = {
1211 .inode = iocb->ki_filp->f_mapping->host,
1212 .pos = iocb->ki_pos,
1213 .len = iov_iter_count(i),
1214 .flags = IOMAP_WRITE,
1215 .private = private,
1216 };
1217 ssize_t ret;
1218
1219 if (iocb->ki_flags & IOCB_NOWAIT)
1220 iter.flags |= IOMAP_NOWAIT;
1221 if (iocb->ki_flags & IOCB_DONTCACHE)
1222 iter.flags |= IOMAP_DONTCACHE;
1223
1224 while ((ret = iomap_iter(&iter, ops)) > 0)
1225 iter.status = iomap_write_iter(&iter, i, write_ops);
1226
1227 if (unlikely(iter.pos == iocb->ki_pos))
1228 return ret;
1229 ret = iter.pos - iocb->ki_pos;
1230 iocb->ki_pos = iter.pos;
1231 return ret;
1232 }
1233 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1234
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1235 static void iomap_write_delalloc_ifs_punch(struct inode *inode,
1236 struct folio *folio, loff_t start_byte, loff_t end_byte,
1237 struct iomap *iomap, iomap_punch_t punch)
1238 {
1239 unsigned int first_blk, last_blk;
1240 loff_t last_byte;
1241 u8 blkbits = inode->i_blkbits;
1242 struct iomap_folio_state *ifs;
1243
1244 /*
1245 * When we have per-block dirty tracking, there can be
1246 * blocks within a folio which are marked uptodate
1247 * but not dirty. In that case it is necessary to punch
1248 * out such blocks to avoid leaking any delalloc blocks.
1249 */
1250 ifs = folio->private;
1251 if (!ifs)
1252 return;
1253
1254 last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
1255 first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1256 last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1257 while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
1258 <= last_blk) {
1259 punch(inode, folio_pos(folio) + (first_blk << blkbits),
1260 1 << blkbits, iomap);
1261 first_blk++;
1262 }
1263 }
1264
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1265 static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1266 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1267 struct iomap *iomap, iomap_punch_t punch)
1268 {
1269 if (!folio_test_dirty(folio))
1270 return;
1271
1272 /* if dirty, punch up to offset */
1273 if (start_byte > *punch_start_byte) {
1274 punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
1275 iomap);
1276 }
1277
1278 /* Punch non-dirty blocks within folio */
1279 iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
1280 iomap, punch);
1281
1282 /*
1283 * Make sure the next punch start is correctly bound to
1284 * the end of this data range, not the end of the folio.
1285 */
1286 *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
1287 }
1288
1289 /*
1290 * Scan the data range passed to us for dirty page cache folios. If we find a
1291 * dirty folio, punch out the preceding range and update the offset from which
1292 * the next punch will start from.
1293 *
1294 * We can punch out storage reservations under clean pages because they either
1295 * contain data that has been written back - in which case the delalloc punch
1296 * over that range is a no-op - or they have been read faults in which case they
1297 * contain zeroes and we can remove the delalloc backing range and any new
1298 * writes to those pages will do the normal hole filling operation...
1299 *
1300 * This makes the logic simple: we only need to keep the delalloc extents only
1301 * over the dirty ranges of the page cache.
1302 *
1303 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1304 * simplify range iterations.
1305 */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1306 static void iomap_write_delalloc_scan(struct inode *inode,
1307 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1308 struct iomap *iomap, iomap_punch_t punch)
1309 {
1310 while (start_byte < end_byte) {
1311 struct folio *folio;
1312
1313 /* grab locked page */
1314 folio = filemap_lock_folio(inode->i_mapping,
1315 start_byte >> PAGE_SHIFT);
1316 if (IS_ERR(folio)) {
1317 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1318 PAGE_SIZE;
1319 continue;
1320 }
1321
1322 iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1323 start_byte, end_byte, iomap, punch);
1324
1325 /* move offset to start of next folio in range */
1326 start_byte = folio_next_pos(folio);
1327 folio_unlock(folio);
1328 folio_put(folio);
1329 }
1330 }
1331
1332 /*
1333 * When a short write occurs, the filesystem might need to use ->iomap_end
1334 * to remove space reservations created in ->iomap_begin.
1335 *
1336 * For filesystems that use delayed allocation, there can be dirty pages over
1337 * the delalloc extent outside the range of a short write but still within the
1338 * delalloc extent allocated for this iomap if the write raced with page
1339 * faults.
1340 *
1341 * Punch out all the delalloc blocks in the range given except for those that
1342 * have dirty data still pending in the page cache - those are going to be
1343 * written and so must still retain the delalloc backing for writeback.
1344 *
1345 * The punch() callback *must* only punch delalloc extents in the range passed
1346 * to it. It must skip over all other types of extents in the range and leave
1347 * them completely unchanged. It must do this punch atomically with respect to
1348 * other extent modifications.
1349 *
1350 * The punch() callback may be called with a folio locked to prevent writeback
1351 * extent allocation racing at the edge of the range we are currently punching.
1352 * The locked folio may or may not cover the range being punched, so it is not
1353 * safe for the punch() callback to lock folios itself.
1354 *
1355 * Lock order is:
1356 *
1357 * inode->i_rwsem (shared or exclusive)
1358 * inode->i_mapping->invalidate_lock (exclusive)
1359 * folio_lock()
1360 * ->punch
1361 * internal filesystem allocation lock
1362 *
1363 * As we are scanning the page cache for data, we don't need to reimplement the
1364 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1365 * start and end of data ranges correctly even for sub-folio block sizes. This
1366 * byte range based iteration is especially convenient because it means we
1367 * don't have to care about variable size folios, nor where the start or end of
1368 * the data range lies within a folio, if they lie within the same folio or even
1369 * if there are multiple discontiguous data ranges within the folio.
1370 *
1371 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1372 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1373 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1374 * date. A write page fault can then mark it dirty. If we then fail a write()
1375 * beyond EOF into that up to date cached range, we allocate a delalloc block
1376 * beyond EOF and then have to punch it out. Because the range is up to date,
1377 * mapping_seek_hole_data() will return it, and we will skip the punch because
1378 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1379 * beyond EOF in this case as writeback will never write back and covert that
1380 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1381 * resulting in always punching out the range from the EOF to the end of the
1382 * range the iomap spans.
1383 *
1384 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1385 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1386 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1387 * returns the end of the data range (data_end). Using closed intervals would
1388 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1389 * the code to subtle off-by-one bugs....
1390 */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,unsigned flags,struct iomap * iomap,iomap_punch_t punch)1391 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1392 loff_t end_byte, unsigned flags, struct iomap *iomap,
1393 iomap_punch_t punch)
1394 {
1395 loff_t punch_start_byte = start_byte;
1396 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1397
1398 /*
1399 * The caller must hold invalidate_lock to avoid races with page faults
1400 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
1401 * we walk the cache and perform delalloc extent removal. Failing to do
1402 * this can leave dirty pages with no space reservation in the cache.
1403 */
1404 lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
1405
1406 while (start_byte < scan_end_byte) {
1407 loff_t data_end;
1408
1409 start_byte = mapping_seek_hole_data(inode->i_mapping,
1410 start_byte, scan_end_byte, SEEK_DATA);
1411 /*
1412 * If there is no more data to scan, all that is left is to
1413 * punch out the remaining range.
1414 *
1415 * Note that mapping_seek_hole_data is only supposed to return
1416 * either an offset or -ENXIO, so WARN on any other error as
1417 * that would be an API change without updating the callers.
1418 */
1419 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1420 break;
1421 if (WARN_ON_ONCE(start_byte < 0))
1422 return;
1423 WARN_ON_ONCE(start_byte < punch_start_byte);
1424 WARN_ON_ONCE(start_byte > scan_end_byte);
1425
1426 /*
1427 * We find the end of this contiguous cached data range by
1428 * seeking from start_byte to the beginning of the next hole.
1429 */
1430 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1431 scan_end_byte, SEEK_HOLE);
1432 if (WARN_ON_ONCE(data_end < 0))
1433 return;
1434
1435 /*
1436 * If we race with post-direct I/O invalidation of the page cache,
1437 * there might be no data left at start_byte.
1438 */
1439 if (data_end == start_byte)
1440 continue;
1441
1442 WARN_ON_ONCE(data_end < start_byte);
1443 WARN_ON_ONCE(data_end > scan_end_byte);
1444
1445 iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
1446 data_end, iomap, punch);
1447
1448 /* The next data search starts at the end of this one. */
1449 start_byte = data_end;
1450 }
1451
1452 if (punch_start_byte < end_byte)
1453 punch(inode, punch_start_byte, end_byte - punch_start_byte,
1454 iomap);
1455 }
1456 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
1457
iomap_unshare_iter(struct iomap_iter * iter,const struct iomap_write_ops * write_ops)1458 static int iomap_unshare_iter(struct iomap_iter *iter,
1459 const struct iomap_write_ops *write_ops)
1460 {
1461 struct iomap *iomap = &iter->iomap;
1462 u64 bytes = iomap_length(iter);
1463 int status;
1464
1465 if (!iomap_want_unshare_iter(iter))
1466 return iomap_iter_advance(iter, bytes);
1467
1468 do {
1469 struct folio *folio;
1470 size_t offset;
1471 bool ret;
1472
1473 bytes = min_t(u64, SIZE_MAX, bytes);
1474 status = iomap_write_begin(iter, write_ops, &folio, &offset,
1475 &bytes);
1476 if (unlikely(status))
1477 return status;
1478 if (iomap->flags & IOMAP_F_STALE)
1479 break;
1480
1481 ret = iomap_write_end(iter, bytes, bytes, folio);
1482 __iomap_put_folio(iter, write_ops, bytes, folio);
1483 if (WARN_ON_ONCE(!ret))
1484 return -EIO;
1485
1486 cond_resched();
1487
1488 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1489
1490 status = iomap_iter_advance(iter, bytes);
1491 if (status)
1492 break;
1493 } while ((bytes = iomap_length(iter)) > 0);
1494
1495 return status;
1496 }
1497
1498 int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops)1499 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1500 const struct iomap_ops *ops,
1501 const struct iomap_write_ops *write_ops)
1502 {
1503 struct iomap_iter iter = {
1504 .inode = inode,
1505 .pos = pos,
1506 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
1507 };
1508 loff_t size = i_size_read(inode);
1509 int ret;
1510
1511 if (pos < 0 || pos >= size)
1512 return 0;
1513
1514 iter.len = min(len, size - pos);
1515 while ((ret = iomap_iter(&iter, ops)) > 0)
1516 iter.status = iomap_unshare_iter(&iter, write_ops);
1517 return ret;
1518 }
1519 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1520
1521 /*
1522 * Flush the remaining range of the iter and mark the current mapping stale.
1523 * This is used when zero range sees an unwritten mapping that may have had
1524 * dirty pagecache over it.
1525 */
iomap_zero_iter_flush_and_stale(struct iomap_iter * i)1526 static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
1527 {
1528 struct address_space *mapping = i->inode->i_mapping;
1529 loff_t end = i->pos + i->len - 1;
1530
1531 i->iomap.flags |= IOMAP_F_STALE;
1532 return filemap_write_and_wait_range(mapping, i->pos, end);
1533 }
1534
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero,const struct iomap_write_ops * write_ops)1535 static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
1536 const struct iomap_write_ops *write_ops)
1537 {
1538 u64 bytes = iomap_length(iter);
1539 int status;
1540
1541 do {
1542 struct folio *folio;
1543 size_t offset;
1544 bool ret;
1545
1546 bytes = min_t(u64, SIZE_MAX, bytes);
1547 status = iomap_write_begin(iter, write_ops, &folio, &offset,
1548 &bytes);
1549 if (status)
1550 return status;
1551 if (iter->iomap.flags & IOMAP_F_STALE)
1552 break;
1553
1554 /* a NULL folio means we're done with a folio batch */
1555 if (!folio) {
1556 status = iomap_iter_advance_full(iter);
1557 break;
1558 }
1559
1560 /* warn about zeroing folios beyond eof that won't write back */
1561 WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
1562
1563 trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
1564 bytes);
1565
1566 folio_zero_range(folio, offset, bytes);
1567 folio_mark_accessed(folio);
1568
1569 ret = iomap_write_end(iter, bytes, bytes, folio);
1570 __iomap_put_folio(iter, write_ops, bytes, folio);
1571 if (WARN_ON_ONCE(!ret))
1572 return -EIO;
1573
1574 status = iomap_iter_advance(iter, bytes);
1575 if (status)
1576 break;
1577 } while ((bytes = iomap_length(iter)) > 0);
1578
1579 if (did_zero)
1580 *did_zero = true;
1581 return status;
1582 }
1583
1584 /**
1585 * iomap_fill_dirty_folios - fill a folio batch with dirty folios
1586 * @iter: Iteration structure
1587 * @start: Start offset of range. Updated based on lookup progress.
1588 * @end: End offset of range
1589 * @iomap_flags: Flags to set on the associated iomap to track the batch.
1590 *
1591 * Returns the folio count directly. Also returns the associated control flag if
1592 * the the batch lookup is performed and the expected offset of a subsequent
1593 * lookup via out params. The caller is responsible to set the flag on the
1594 * associated iomap.
1595 */
1596 unsigned int
iomap_fill_dirty_folios(struct iomap_iter * iter,loff_t * start,loff_t end,unsigned int * iomap_flags)1597 iomap_fill_dirty_folios(
1598 struct iomap_iter *iter,
1599 loff_t *start,
1600 loff_t end,
1601 unsigned int *iomap_flags)
1602 {
1603 struct address_space *mapping = iter->inode->i_mapping;
1604 pgoff_t pstart = *start >> PAGE_SHIFT;
1605 pgoff_t pend = (end - 1) >> PAGE_SHIFT;
1606 unsigned int count;
1607
1608 if (!iter->fbatch) {
1609 *start = end;
1610 return 0;
1611 }
1612
1613 count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
1614 *start = (pstart << PAGE_SHIFT);
1615 *iomap_flags |= IOMAP_F_FOLIO_BATCH;
1616 return count;
1617 }
1618 EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
1619
1620 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1621 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1622 const struct iomap_ops *ops,
1623 const struct iomap_write_ops *write_ops, void *private)
1624 {
1625 struct folio_batch fbatch;
1626 struct iomap_iter iter = {
1627 .inode = inode,
1628 .pos = pos,
1629 .len = len,
1630 .flags = IOMAP_ZERO,
1631 .private = private,
1632 .fbatch = &fbatch,
1633 };
1634 struct address_space *mapping = inode->i_mapping;
1635 int ret;
1636 bool range_dirty;
1637
1638 folio_batch_init(&fbatch);
1639
1640 /*
1641 * To avoid an unconditional flush, check pagecache state and only flush
1642 * if dirty and the fs returns a mapping that might convert on
1643 * writeback.
1644 */
1645 range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
1646 iter.pos + iter.len - 1);
1647 while ((ret = iomap_iter(&iter, ops)) > 0) {
1648 const struct iomap *srcmap = iomap_iter_srcmap(&iter);
1649
1650 if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1651 srcmap->type != IOMAP_UNWRITTEN))
1652 return -EIO;
1653
1654 if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1655 (srcmap->type == IOMAP_HOLE ||
1656 srcmap->type == IOMAP_UNWRITTEN)) {
1657 s64 status;
1658
1659 if (range_dirty) {
1660 range_dirty = false;
1661 status = iomap_zero_iter_flush_and_stale(&iter);
1662 } else {
1663 status = iomap_iter_advance_full(&iter);
1664 }
1665 iter.status = status;
1666 continue;
1667 }
1668
1669 iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
1670 }
1671 return ret;
1672 }
1673 EXPORT_SYMBOL_GPL(iomap_zero_range);
1674
1675 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1676 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1677 const struct iomap_ops *ops,
1678 const struct iomap_write_ops *write_ops, void *private)
1679 {
1680 unsigned int blocksize = i_blocksize(inode);
1681 unsigned int off = pos & (blocksize - 1);
1682
1683 /* Block boundary? Nothing to do */
1684 if (!off)
1685 return 0;
1686 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
1687 write_ops, private);
1688 }
1689 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1690
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1691 static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1692 struct folio *folio)
1693 {
1694 loff_t length = iomap_length(iter);
1695 int ret;
1696
1697 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1698 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1699 &iter->iomap);
1700 if (ret)
1701 return ret;
1702 block_commit_write(folio, 0, length);
1703 } else {
1704 WARN_ON_ONCE(!folio_test_uptodate(folio));
1705 folio_mark_dirty(folio);
1706 }
1707
1708 return iomap_iter_advance(iter, length);
1709 }
1710
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops,void * private)1711 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
1712 void *private)
1713 {
1714 struct iomap_iter iter = {
1715 .inode = file_inode(vmf->vma->vm_file),
1716 .flags = IOMAP_WRITE | IOMAP_FAULT,
1717 .private = private,
1718 };
1719 struct folio *folio = page_folio(vmf->page);
1720 ssize_t ret;
1721
1722 folio_lock(folio);
1723 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1724 if (ret < 0)
1725 goto out_unlock;
1726 iter.pos = folio_pos(folio);
1727 iter.len = ret;
1728 while ((ret = iomap_iter(&iter, ops)) > 0)
1729 iter.status = iomap_folio_mkwrite_iter(&iter, folio);
1730
1731 if (ret < 0)
1732 goto out_unlock;
1733 folio_wait_stable(folio);
1734 return VM_FAULT_LOCKED;
1735 out_unlock:
1736 folio_unlock(folio);
1737 return vmf_fs_error(ret);
1738 }
1739 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1740
iomap_writeback_init(struct inode * inode,struct folio * folio)1741 static void iomap_writeback_init(struct inode *inode, struct folio *folio)
1742 {
1743 struct iomap_folio_state *ifs = folio->private;
1744
1745 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1746 if (ifs) {
1747 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1748 /*
1749 * Set this to the folio size. After processing the folio for
1750 * writeback in iomap_writeback_folio(), we'll subtract any
1751 * ranges not written back.
1752 *
1753 * We do this because otherwise, we would have to atomically
1754 * increment ifs->write_bytes_pending every time a range in the
1755 * folio needs to be written back.
1756 */
1757 atomic_set(&ifs->write_bytes_pending, folio_size(folio));
1758 }
1759 }
1760
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len)1761 void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1762 size_t len)
1763 {
1764 struct iomap_folio_state *ifs = folio->private;
1765
1766 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1767 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1768
1769 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1770 folio_end_writeback(folio);
1771 }
1772 EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
1773
iomap_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 pos,u32 rlen,u64 end_pos,size_t * bytes_submitted)1774 static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
1775 struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
1776 size_t *bytes_submitted)
1777 {
1778 do {
1779 ssize_t ret;
1780
1781 ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
1782 if (WARN_ON_ONCE(ret == 0 || ret > rlen))
1783 return -EIO;
1784 if (ret < 0)
1785 return ret;
1786 rlen -= ret;
1787 pos += ret;
1788
1789 /*
1790 * Holes are not written back by ->writeback_range, so track
1791 * if we did handle anything that is not a hole here.
1792 */
1793 if (wpc->iomap.type != IOMAP_HOLE)
1794 *bytes_submitted += ret;
1795 } while (rlen);
1796
1797 return 0;
1798 }
1799
1800 /*
1801 * Check interaction of the folio with the file end.
1802 *
1803 * If the folio is entirely beyond i_size, return false. If it straddles
1804 * i_size, adjust end_pos and zero all data beyond i_size.
1805 */
iomap_writeback_handle_eof(struct folio * folio,struct inode * inode,u64 * end_pos)1806 static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
1807 u64 *end_pos)
1808 {
1809 u64 isize = i_size_read(inode);
1810
1811 if (*end_pos > isize) {
1812 size_t poff = offset_in_folio(folio, isize);
1813 pgoff_t end_index = isize >> PAGE_SHIFT;
1814
1815 /*
1816 * If the folio is entirely ouside of i_size, skip it.
1817 *
1818 * This can happen due to a truncate operation that is in
1819 * progress and in that case truncate will finish it off once
1820 * we've dropped the folio lock.
1821 *
1822 * Note that the pgoff_t used for end_index is an unsigned long.
1823 * If the given offset is greater than 16TB on a 32-bit system,
1824 * then if we checked if the folio is fully outside i_size with
1825 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1826 * overflow and evaluate to 0. Hence this folio would be
1827 * redirtied and written out repeatedly, which would result in
1828 * an infinite loop; the user program performing this operation
1829 * would hang. Instead, we can detect this situation by
1830 * checking if the folio is totally beyond i_size or if its
1831 * offset is just equal to the EOF.
1832 */
1833 if (folio->index > end_index ||
1834 (folio->index == end_index && poff == 0))
1835 return false;
1836
1837 /*
1838 * The folio straddles i_size.
1839 *
1840 * It must be zeroed out on each and every writepage invocation
1841 * because it may be mmapped:
1842 *
1843 * A file is mapped in multiples of the page size. For a
1844 * file that is not a multiple of the page size, the
1845 * remaining memory is zeroed when mapped, and writes to that
1846 * region are not written out to the file.
1847 *
1848 * Also adjust the end_pos to the end of file and skip writeback
1849 * for all blocks entirely beyond i_size.
1850 */
1851 folio_zero_segment(folio, poff, folio_size(folio));
1852 *end_pos = isize;
1853 }
1854
1855 return true;
1856 }
1857
iomap_writeback_folio(struct iomap_writepage_ctx * wpc,struct folio * folio)1858 int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
1859 {
1860 struct iomap_folio_state *ifs = folio->private;
1861 struct inode *inode = wpc->inode;
1862 u64 pos = folio_pos(folio);
1863 u64 end_pos = pos + folio_size(folio);
1864 u64 end_aligned = 0;
1865 loff_t orig_pos = pos;
1866 size_t bytes_submitted = 0;
1867 int error = 0;
1868 u32 rlen;
1869
1870 WARN_ON_ONCE(!folio_test_locked(folio));
1871 WARN_ON_ONCE(folio_test_dirty(folio));
1872 WARN_ON_ONCE(folio_test_writeback(folio));
1873
1874 trace_iomap_writeback_folio(inode, pos, folio_size(folio));
1875
1876 if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
1877 return 0;
1878 WARN_ON_ONCE(end_pos <= pos);
1879
1880 if (i_blocks_per_folio(inode, folio) > 1) {
1881 if (!ifs) {
1882 ifs = ifs_alloc(inode, folio, 0);
1883 iomap_set_range_dirty(folio, 0, end_pos - pos);
1884 }
1885
1886 iomap_writeback_init(inode, folio);
1887 }
1888
1889 /*
1890 * Set the writeback bit ASAP, as the I/O completion for the single
1891 * block per folio case happen hit as soon as we're submitting the bio.
1892 */
1893 folio_start_writeback(folio);
1894
1895 /*
1896 * Walk through the folio to find dirty areas to write back.
1897 */
1898 end_aligned = round_up(end_pos, i_blocksize(inode));
1899 while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
1900 error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
1901 &bytes_submitted);
1902 if (error)
1903 break;
1904 pos += rlen;
1905 }
1906
1907 if (bytes_submitted)
1908 wpc->nr_folios++;
1909 if (error && pos > orig_pos)
1910 fserror_report_io(inode, FSERR_BUFFERED_WRITE, orig_pos, 0,
1911 error, GFP_NOFS);
1912
1913 /*
1914 * We can have dirty bits set past end of file in page_mkwrite path
1915 * while mapping the last partial folio. Hence it's better to clear
1916 * all the dirty bits in the folio here.
1917 */
1918 iomap_clear_range_dirty(folio, 0, folio_size(folio));
1919
1920 /*
1921 * Usually the writeback bit is cleared by the I/O completion handler.
1922 * But we may end up either not actually writing any blocks, or (when
1923 * there are multiple blocks in a folio) all I/O might have finished
1924 * already at this point. In that case we need to clear the writeback
1925 * bit ourselves right after unlocking the page.
1926 */
1927 if (ifs) {
1928 /*
1929 * Subtract any bytes that were initially accounted to
1930 * write_bytes_pending but skipped for writeback.
1931 */
1932 size_t bytes_not_submitted = folio_size(folio) -
1933 bytes_submitted;
1934
1935 if (bytes_not_submitted)
1936 iomap_finish_folio_write(inode, folio,
1937 bytes_not_submitted);
1938 } else if (!bytes_submitted) {
1939 folio_end_writeback(folio);
1940 }
1941
1942 mapping_set_error(inode->i_mapping, error);
1943 return error;
1944 }
1945 EXPORT_SYMBOL_GPL(iomap_writeback_folio);
1946
1947 int
iomap_writepages(struct iomap_writepage_ctx * wpc)1948 iomap_writepages(struct iomap_writepage_ctx *wpc)
1949 {
1950 struct address_space *mapping = wpc->inode->i_mapping;
1951 struct folio *folio = NULL;
1952 int error;
1953
1954 /*
1955 * Writeback from reclaim context should never happen except in the case
1956 * of a VM regression so warn about it and refuse to write the data.
1957 */
1958 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
1959 PF_MEMALLOC))
1960 return -EIO;
1961
1962 while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
1963 error = iomap_writeback_folio(wpc, folio);
1964 folio_unlock(folio);
1965 }
1966
1967 /*
1968 * If @error is non-zero, it means that we have a situation where some
1969 * part of the submission process has failed after we've marked pages
1970 * for writeback.
1971 *
1972 * We cannot cancel the writeback directly in that case, so always call
1973 * ->writeback_submit to run the I/O completion handler to clear the
1974 * writeback bit and let the file system proess the errors.
1975 */
1976 if (wpc->wb_ctx)
1977 return wpc->ops->writeback_submit(wpc, error);
1978 return error;
1979 }
1980 EXPORT_SYMBOL_GPL(iomap_writepages);
1981