1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2023 Christoph Hellwig.
5 */
6 #include <linux/iomap.h>
7 #include <linux/buffer_head.h>
8 #include <linux/writeback.h>
9 #include <linux/swap.h>
10 #include <linux/migrate.h>
11 #include <linux/fserror.h>
12 #include "internal.h"
13 #include "trace.h"
14
15 #include "../internal.h"
16
17 /*
18 * Structure allocated for each folio to track per-block uptodate, dirty state
19 * and I/O completions.
20 */
21 struct iomap_folio_state {
22 spinlock_t state_lock;
23 unsigned int read_bytes_pending;
24 atomic_t write_bytes_pending;
25
26 /*
27 * Each block has two bits in this bitmap:
28 * Bits [0..blocks_per_folio) has the uptodate status.
29 * Bits [b_p_f...(2*b_p_f)) has the dirty status.
30 */
31 unsigned long state[];
32 };
33
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)34 static inline bool ifs_is_fully_uptodate(struct folio *folio,
35 struct iomap_folio_state *ifs)
36 {
37 struct inode *inode = folio->mapping->host;
38
39 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
40 }
41
42 /*
43 * Find the next uptodate block in the folio. end_blk is inclusive.
44 * If no uptodate block is found, this will return end_blk + 1.
45 */
ifs_next_uptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk)46 static unsigned ifs_next_uptodate_block(struct folio *folio,
47 unsigned start_blk, unsigned end_blk)
48 {
49 struct iomap_folio_state *ifs = folio->private;
50
51 return find_next_bit(ifs->state, end_blk + 1, start_blk);
52 }
53
54 /*
55 * Find the next non-uptodate block in the folio. end_blk is inclusive.
56 * If no non-uptodate block is found, this will return end_blk + 1.
57 */
ifs_next_nonuptodate_block(struct folio * folio,unsigned start_blk,unsigned end_blk)58 static unsigned ifs_next_nonuptodate_block(struct folio *folio,
59 unsigned start_blk, unsigned end_blk)
60 {
61 struct iomap_folio_state *ifs = folio->private;
62
63 return find_next_zero_bit(ifs->state, end_blk + 1, start_blk);
64 }
65
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)66 static bool ifs_set_range_uptodate(struct folio *folio,
67 struct iomap_folio_state *ifs, size_t off, size_t len)
68 {
69 struct inode *inode = folio->mapping->host;
70 unsigned int first_blk = off >> inode->i_blkbits;
71 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
72 unsigned int nr_blks = last_blk - first_blk + 1;
73
74 bitmap_set(ifs->state, first_blk, nr_blks);
75 return ifs_is_fully_uptodate(folio, ifs);
76 }
77
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)78 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
79 size_t len)
80 {
81 struct iomap_folio_state *ifs = folio->private;
82 unsigned long flags;
83 bool uptodate = true;
84
85 if (folio_test_uptodate(folio))
86 return;
87
88 if (ifs) {
89 spin_lock_irqsave(&ifs->state_lock, flags);
90 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
91 spin_unlock_irqrestore(&ifs->state_lock, flags);
92 }
93
94 if (uptodate)
95 folio_mark_uptodate(folio);
96 }
97
98 /*
99 * Find the next dirty block in the folio. end_blk is inclusive.
100 * If no dirty block is found, this will return end_blk + 1.
101 */
ifs_next_dirty_block(struct folio * folio,unsigned start_blk,unsigned end_blk)102 static unsigned ifs_next_dirty_block(struct folio *folio,
103 unsigned start_blk, unsigned end_blk)
104 {
105 struct iomap_folio_state *ifs = folio->private;
106 struct inode *inode = folio->mapping->host;
107 unsigned int blks = i_blocks_per_folio(inode, folio);
108
109 return find_next_bit(ifs->state, blks + end_blk + 1,
110 blks + start_blk) - blks;
111 }
112
113 /*
114 * Find the next clean block in the folio. end_blk is inclusive.
115 * If no clean block is found, this will return end_blk + 1.
116 */
ifs_next_clean_block(struct folio * folio,unsigned start_blk,unsigned end_blk)117 static unsigned ifs_next_clean_block(struct folio *folio,
118 unsigned start_blk, unsigned end_blk)
119 {
120 struct iomap_folio_state *ifs = folio->private;
121 struct inode *inode = folio->mapping->host;
122 unsigned int blks = i_blocks_per_folio(inode, folio);
123
124 return find_next_zero_bit(ifs->state, blks + end_blk + 1,
125 blks + start_blk) - blks;
126 }
127
ifs_find_dirty_range(struct folio * folio,struct iomap_folio_state * ifs,u64 * range_start,u64 range_end)128 static unsigned ifs_find_dirty_range(struct folio *folio,
129 struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
130 {
131 struct inode *inode = folio->mapping->host;
132 unsigned start_blk =
133 offset_in_folio(folio, *range_start) >> inode->i_blkbits;
134 unsigned end_blk = min_not_zero(
135 offset_in_folio(folio, range_end) >> inode->i_blkbits,
136 i_blocks_per_folio(inode, folio)) - 1;
137 unsigned nblks;
138
139 start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
140 if (start_blk > end_blk)
141 return 0;
142 if (start_blk == end_blk)
143 nblks = 1;
144 else
145 nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
146 start_blk;
147
148 *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
149 return nblks << inode->i_blkbits;
150 }
151
iomap_find_dirty_range(struct folio * folio,u64 * range_start,u64 range_end)152 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
153 u64 range_end)
154 {
155 struct iomap_folio_state *ifs = folio->private;
156
157 if (*range_start >= range_end)
158 return 0;
159
160 if (ifs)
161 return ifs_find_dirty_range(folio, ifs, range_start, range_end);
162 return range_end - *range_start;
163 }
164
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)165 static void ifs_clear_range_dirty(struct folio *folio,
166 struct iomap_folio_state *ifs, size_t off, size_t len)
167 {
168 struct inode *inode = folio->mapping->host;
169 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
170 unsigned int first_blk = (off >> inode->i_blkbits);
171 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
172 unsigned int nr_blks = last_blk - first_blk + 1;
173 unsigned long flags;
174
175 spin_lock_irqsave(&ifs->state_lock, flags);
176 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
177 spin_unlock_irqrestore(&ifs->state_lock, flags);
178 }
179
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)180 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
181 {
182 struct iomap_folio_state *ifs = folio->private;
183
184 if (ifs)
185 ifs_clear_range_dirty(folio, ifs, off, len);
186 }
187
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)188 static void ifs_set_range_dirty(struct folio *folio,
189 struct iomap_folio_state *ifs, size_t off, size_t len)
190 {
191 struct inode *inode = folio->mapping->host;
192 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
193 unsigned int first_blk = (off >> inode->i_blkbits);
194 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
195 unsigned int nr_blks = last_blk - first_blk + 1;
196 unsigned long flags;
197
198 spin_lock_irqsave(&ifs->state_lock, flags);
199 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
200 spin_unlock_irqrestore(&ifs->state_lock, flags);
201 }
202
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)203 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
204 {
205 struct iomap_folio_state *ifs = folio->private;
206
207 if (ifs)
208 ifs_set_range_dirty(folio, ifs, off, len);
209 }
210
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)211 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
212 struct folio *folio, unsigned int flags)
213 {
214 struct iomap_folio_state *ifs = folio->private;
215 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
216 gfp_t gfp;
217
218 if (ifs || nr_blocks <= 1)
219 return ifs;
220
221 if (flags & IOMAP_NOWAIT)
222 gfp = GFP_NOWAIT;
223 else
224 gfp = GFP_NOFS | __GFP_NOFAIL;
225
226 /*
227 * ifs->state tracks two sets of state flags when the
228 * filesystem block size is smaller than the folio size.
229 * The first state tracks per-block uptodate and the
230 * second tracks per-block dirty state.
231 */
232 ifs = kzalloc_flex(*ifs, state, BITS_TO_LONGS(2 * nr_blocks), gfp);
233 if (!ifs)
234 return ifs;
235
236 spin_lock_init(&ifs->state_lock);
237 if (folio_test_uptodate(folio))
238 bitmap_set(ifs->state, 0, nr_blocks);
239 if (folio_test_dirty(folio))
240 bitmap_set(ifs->state, nr_blocks, nr_blocks);
241 folio_attach_private(folio, ifs);
242
243 return ifs;
244 }
245
ifs_free(struct folio * folio)246 static void ifs_free(struct folio *folio)
247 {
248 struct iomap_folio_state *ifs = folio_detach_private(folio);
249
250 if (!ifs)
251 return;
252 WARN_ON_ONCE(ifs->read_bytes_pending != 0);
253 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
254 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
255 folio_test_uptodate(folio));
256 kfree(ifs);
257 }
258
259 /*
260 * Calculate how many bytes to truncate based off the number of blocks to
261 * truncate and the end position to start truncating from.
262 */
iomap_bytes_to_truncate(loff_t end_pos,unsigned block_bits,unsigned blocks_truncated)263 static size_t iomap_bytes_to_truncate(loff_t end_pos, unsigned block_bits,
264 unsigned blocks_truncated)
265 {
266 unsigned block_size = 1 << block_bits;
267 unsigned block_offset = end_pos & (block_size - 1);
268
269 if (!block_offset)
270 return blocks_truncated << block_bits;
271
272 return ((blocks_truncated - 1) << block_bits) + block_offset;
273 }
274
275 /*
276 * Calculate the range inside the folio that we actually need to read.
277 */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)278 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
279 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
280 {
281 struct iomap_folio_state *ifs = folio->private;
282 loff_t orig_pos = *pos;
283 loff_t isize = i_size_read(inode);
284 unsigned block_bits = inode->i_blkbits;
285 unsigned block_size = (1 << block_bits);
286 size_t poff = offset_in_folio(folio, *pos);
287 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
288 size_t orig_plen = plen;
289 unsigned first = poff >> block_bits;
290 unsigned last = (poff + plen - 1) >> block_bits;
291
292 /*
293 * If the block size is smaller than the page size, we need to check the
294 * per-block uptodate status and adjust the offset and length if needed
295 * to avoid reading in already uptodate ranges.
296 */
297 if (ifs) {
298 unsigned int next, blocks_skipped;
299
300 next = ifs_next_nonuptodate_block(folio, first, last);
301 blocks_skipped = next - first;
302
303 if (blocks_skipped) {
304 unsigned long block_offset = *pos & (block_size - 1);
305 unsigned bytes_skipped =
306 (blocks_skipped << block_bits) - block_offset;
307
308 *pos += bytes_skipped;
309 poff += bytes_skipped;
310 plen -= bytes_skipped;
311 }
312 first = next;
313
314 /* truncate len if we find any trailing uptodate block(s) */
315 if (++next <= last) {
316 next = ifs_next_uptodate_block(folio, next, last);
317 if (next <= last) {
318 plen -= iomap_bytes_to_truncate(*pos + plen,
319 block_bits, last - next + 1);
320 last = next - 1;
321 }
322 }
323 }
324
325 /*
326 * If the extent spans the block that contains the i_size, we need to
327 * handle both halves separately so that we properly zero data in the
328 * page cache for blocks that are entirely outside of i_size.
329 */
330 if (orig_pos <= isize && orig_pos + orig_plen > isize) {
331 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
332
333 if (first <= end && last > end)
334 plen -= iomap_bytes_to_truncate(*pos + plen, block_bits,
335 last - end);
336 }
337
338 *offp = poff;
339 *lenp = plen;
340 }
341
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)342 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
343 loff_t pos)
344 {
345 const struct iomap *srcmap = iomap_iter_srcmap(iter);
346
347 return srcmap->type != IOMAP_MAPPED ||
348 (srcmap->flags & IOMAP_F_NEW) ||
349 pos >= i_size_read(iter->inode);
350 }
351
352 /**
353 * iomap_read_inline_data - copy inline data into the page cache
354 * @iter: iteration structure
355 * @folio: folio to copy to
356 *
357 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
358 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
359 * Returns zero for success to complete the read, or the usual negative errno.
360 */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)361 static int iomap_read_inline_data(const struct iomap_iter *iter,
362 struct folio *folio)
363 {
364 const struct iomap *iomap = iomap_iter_srcmap(iter);
365 size_t size = i_size_read(iter->inode) - iomap->offset;
366 size_t offset = offset_in_folio(folio, iomap->offset);
367
368 if (WARN_ON_ONCE(!iomap->inline_data))
369 return -EIO;
370
371 if (folio_test_uptodate(folio))
372 return 0;
373
374 if (WARN_ON_ONCE(size > iomap->length)) {
375 fserror_report_io(iter->inode, FSERR_BUFFERED_READ,
376 iomap->offset, size, -EIO, GFP_NOFS);
377 return -EIO;
378 }
379 if (offset > 0)
380 ifs_alloc(iter->inode, folio, iter->flags);
381
382 folio_fill_tail(folio, offset, iomap->inline_data, size);
383 iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
384 return 0;
385 }
386
iomap_finish_folio_read(struct folio * folio,size_t off,size_t len,int error)387 void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
388 int error)
389 {
390 struct iomap_folio_state *ifs = folio->private;
391 bool uptodate = !error;
392 bool finished = true;
393
394 if (ifs) {
395 unsigned long flags;
396
397 spin_lock_irqsave(&ifs->state_lock, flags);
398 if (!error)
399 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
400 ifs->read_bytes_pending -= len;
401 finished = !ifs->read_bytes_pending;
402 spin_unlock_irqrestore(&ifs->state_lock, flags);
403 }
404
405 if (error)
406 fserror_report_io(folio->mapping->host, FSERR_BUFFERED_READ,
407 folio_pos(folio) + off, len, error,
408 GFP_ATOMIC);
409
410 if (finished)
411 folio_end_read(folio, uptodate);
412 }
413 EXPORT_SYMBOL_GPL(iomap_finish_folio_read);
414
iomap_read_init(struct folio * folio)415 static void iomap_read_init(struct folio *folio)
416 {
417 struct iomap_folio_state *ifs = folio->private;
418
419 if (ifs) {
420 /*
421 * ifs->read_bytes_pending is used to track how many bytes are
422 * read in asynchronously by the IO helper. We need to track
423 * this so that we can know when the IO helper has finished
424 * reading in all the necessary ranges of the folio and can end
425 * the read.
426 *
427 * Increase ->read_bytes_pending by the folio size to start.
428 * We'll subtract any uptodate / zeroed ranges that did not
429 * require IO in iomap_read_end() after we're done processing
430 * the folio.
431 *
432 * We do this because otherwise, we would have to increment
433 * ifs->read_bytes_pending every time a range in the folio needs
434 * to be read in, which can get expensive since the spinlock
435 * needs to be held whenever modifying ifs->read_bytes_pending.
436 */
437 spin_lock_irq(&ifs->state_lock);
438 WARN_ON_ONCE(ifs->read_bytes_pending != 0);
439 ifs->read_bytes_pending = folio_size(folio);
440 spin_unlock_irq(&ifs->state_lock);
441 }
442 }
443
444 /*
445 * This ends IO if no bytes were submitted to an IO helper.
446 *
447 * Otherwise, this calibrates ifs->read_bytes_pending to represent only the
448 * submitted bytes (see comment in iomap_read_init()). If all bytes submitted
449 * have already been completed by the IO helper, then this will end the read.
450 * Else the IO helper will end the read after all submitted ranges have been
451 * read.
452 */
iomap_read_end(struct folio * folio,size_t bytes_submitted)453 static void iomap_read_end(struct folio *folio, size_t bytes_submitted)
454 {
455 struct iomap_folio_state *ifs = folio->private;
456
457 if (ifs) {
458 bool end_read, uptodate;
459
460 spin_lock_irq(&ifs->state_lock);
461 if (!ifs->read_bytes_pending) {
462 WARN_ON_ONCE(bytes_submitted);
463 spin_unlock_irq(&ifs->state_lock);
464 folio_unlock(folio);
465 return;
466 }
467
468 /*
469 * Subtract any bytes that were initially accounted to
470 * read_bytes_pending but skipped for IO.
471 */
472 ifs->read_bytes_pending -= folio_size(folio) - bytes_submitted;
473
474 /*
475 * If !ifs->read_bytes_pending, this means all pending reads by
476 * the IO helper have already completed, which means we need to
477 * end the folio read here. If ifs->read_bytes_pending != 0,
478 * the IO helper will end the folio read.
479 */
480 end_read = !ifs->read_bytes_pending;
481 if (end_read)
482 uptodate = ifs_is_fully_uptodate(folio, ifs);
483 spin_unlock_irq(&ifs->state_lock);
484 if (end_read)
485 folio_end_read(folio, uptodate);
486 } else {
487 /*
488 * If a folio without an ifs is submitted to the IO helper, the
489 * read must be on the entire folio and the IO helper takes
490 * ownership of the folio. This means we should only enter
491 * iomap_read_end() for the !ifs case if no bytes were submitted
492 * to the IO helper, in which case we are responsible for
493 * unlocking the folio here.
494 */
495 WARN_ON_ONCE(bytes_submitted);
496 folio_unlock(folio);
497 }
498 }
499
iomap_read_folio_iter(struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t * bytes_submitted)500 static int iomap_read_folio_iter(struct iomap_iter *iter,
501 struct iomap_read_folio_ctx *ctx, size_t *bytes_submitted)
502 {
503 const struct iomap *iomap = &iter->iomap;
504 loff_t pos = iter->pos;
505 loff_t length = iomap_length(iter);
506 struct folio *folio = ctx->cur_folio;
507 size_t folio_len = folio_size(folio);
508 size_t poff, plen;
509 loff_t pos_diff;
510 int ret;
511
512 if (iomap->type == IOMAP_INLINE) {
513 ret = iomap_read_inline_data(iter, folio);
514 if (ret)
515 return ret;
516 return iomap_iter_advance(iter, length);
517 }
518
519 ifs_alloc(iter->inode, folio, iter->flags);
520
521 length = min_t(loff_t, length, folio_len - offset_in_folio(folio, pos));
522 while (length) {
523 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff,
524 &plen);
525
526 pos_diff = pos - iter->pos;
527 if (WARN_ON_ONCE(pos_diff + plen > length))
528 return -EIO;
529
530 ret = iomap_iter_advance(iter, pos_diff);
531 if (ret)
532 return ret;
533
534 if (plen == 0)
535 return 0;
536
537 /* zero post-eof blocks as the page may be mapped */
538 if (iomap_block_needs_zeroing(iter, pos)) {
539 folio_zero_range(folio, poff, plen);
540 iomap_set_range_uptodate(folio, poff, plen);
541 } else {
542 if (!*bytes_submitted)
543 iomap_read_init(folio);
544 ret = ctx->ops->read_folio_range(iter, ctx, plen);
545 if (ret < 0)
546 fserror_report_io(iter->inode,
547 FSERR_BUFFERED_READ, pos,
548 plen, ret, GFP_NOFS);
549 if (ret)
550 return ret;
551
552 *bytes_submitted += plen;
553 /*
554 * If the entire folio has been read in by the IO
555 * helper, then the helper owns the folio and will end
556 * the read on it.
557 */
558 if (*bytes_submitted == folio_len)
559 ctx->cur_folio = NULL;
560 }
561
562 ret = iomap_iter_advance(iter, plen);
563 if (ret)
564 return ret;
565 length -= pos_diff + plen;
566 pos = iter->pos;
567 }
568 return 0;
569 }
570
iomap_read_folio(const struct iomap_ops * ops,struct iomap_read_folio_ctx * ctx,void * private)571 void iomap_read_folio(const struct iomap_ops *ops,
572 struct iomap_read_folio_ctx *ctx, void *private)
573 {
574 struct folio *folio = ctx->cur_folio;
575 struct iomap_iter iter = {
576 .inode = folio->mapping->host,
577 .pos = folio_pos(folio),
578 .len = folio_size(folio),
579 .private = private,
580 };
581 size_t bytes_submitted = 0;
582 int ret;
583
584 trace_iomap_readpage(iter.inode, 1);
585
586 while ((ret = iomap_iter(&iter, ops)) > 0)
587 iter.status = iomap_read_folio_iter(&iter, ctx,
588 &bytes_submitted);
589
590 if (ctx->ops->submit_read)
591 ctx->ops->submit_read(ctx);
592
593 if (ctx->cur_folio)
594 iomap_read_end(ctx->cur_folio, bytes_submitted);
595 }
596 EXPORT_SYMBOL_GPL(iomap_read_folio);
597
iomap_readahead_iter(struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t * cur_bytes_submitted)598 static int iomap_readahead_iter(struct iomap_iter *iter,
599 struct iomap_read_folio_ctx *ctx, size_t *cur_bytes_submitted)
600 {
601 int ret;
602
603 while (iomap_length(iter)) {
604 if (ctx->cur_folio &&
605 offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
606 iomap_read_end(ctx->cur_folio, *cur_bytes_submitted);
607 ctx->cur_folio = NULL;
608 }
609 if (!ctx->cur_folio) {
610 ctx->cur_folio = readahead_folio(ctx->rac);
611 if (WARN_ON_ONCE(!ctx->cur_folio))
612 return -EINVAL;
613 *cur_bytes_submitted = 0;
614 }
615 ret = iomap_read_folio_iter(iter, ctx, cur_bytes_submitted);
616 if (ret)
617 return ret;
618 }
619
620 return 0;
621 }
622
623 /**
624 * iomap_readahead - Attempt to read pages from a file.
625 * @ops: The operations vector for the filesystem.
626 * @ctx: The ctx used for issuing readahead.
627 * @private: The filesystem-specific information for issuing iomap_iter.
628 *
629 * This function is for filesystems to call to implement their readahead
630 * address_space operation.
631 *
632 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
633 * blocks from disc), and may wait for it. The caller may be trying to
634 * access a different page, and so sleeping excessively should be avoided.
635 * It may allocate memory, but should avoid costly allocations. This
636 * function is called with memalloc_nofs set, so allocations will not cause
637 * the filesystem to be reentered.
638 */
iomap_readahead(const struct iomap_ops * ops,struct iomap_read_folio_ctx * ctx,void * private)639 void iomap_readahead(const struct iomap_ops *ops,
640 struct iomap_read_folio_ctx *ctx, void *private)
641 {
642 struct readahead_control *rac = ctx->rac;
643 struct iomap_iter iter = {
644 .inode = rac->mapping->host,
645 .pos = readahead_pos(rac),
646 .len = readahead_length(rac),
647 .private = private,
648 };
649 size_t cur_bytes_submitted;
650
651 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
652
653 while (iomap_iter(&iter, ops) > 0)
654 iter.status = iomap_readahead_iter(&iter, ctx,
655 &cur_bytes_submitted);
656
657 if (ctx->ops->submit_read)
658 ctx->ops->submit_read(ctx);
659
660 if (ctx->cur_folio)
661 iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
662 }
663 EXPORT_SYMBOL_GPL(iomap_readahead);
664
665 /*
666 * iomap_is_partially_uptodate checks whether blocks within a folio are
667 * uptodate or not.
668 *
669 * Returns true if all blocks which correspond to the specified part
670 * of the folio are uptodate.
671 */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)672 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
673 {
674 struct iomap_folio_state *ifs = folio->private;
675 struct inode *inode = folio->mapping->host;
676 unsigned first, last;
677
678 if (!ifs)
679 return false;
680
681 /* Caller's range may extend past the end of this folio */
682 count = min(folio_size(folio) - from, count);
683
684 /* First and last blocks in range within folio */
685 first = from >> inode->i_blkbits;
686 last = (from + count - 1) >> inode->i_blkbits;
687
688 return ifs_next_nonuptodate_block(folio, first, last) > last;
689 }
690 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
691
692 /**
693 * iomap_get_folio - get a folio reference for writing
694 * @iter: iteration structure
695 * @pos: start offset of write
696 * @len: Suggested size of folio to create.
697 *
698 * Returns a locked reference to the folio at @pos, or an error pointer if the
699 * folio could not be obtained.
700 */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)701 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
702 {
703 fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
704
705 if (iter->flags & IOMAP_NOWAIT)
706 fgp |= FGP_NOWAIT;
707 if (iter->flags & IOMAP_DONTCACHE)
708 fgp |= FGP_DONTCACHE;
709 fgp |= fgf_set_order(len);
710
711 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
712 fgp, mapping_gfp_mask(iter->inode->i_mapping));
713 }
714 EXPORT_SYMBOL_GPL(iomap_get_folio);
715
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)716 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
717 {
718 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
719 folio_size(folio));
720
721 /*
722 * If the folio is dirty, we refuse to release our metadata because
723 * it may be partially dirty. Once we track per-block dirty state,
724 * we can release the metadata if every block is dirty.
725 */
726 if (folio_test_dirty(folio))
727 return false;
728 ifs_free(folio);
729 return true;
730 }
731 EXPORT_SYMBOL_GPL(iomap_release_folio);
732
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)733 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
734 {
735 trace_iomap_invalidate_folio(folio->mapping->host,
736 folio_pos(folio) + offset, len);
737
738 /*
739 * If we're invalidating the entire folio, clear the dirty state
740 * from it and release it to avoid unnecessary buildup of the LRU.
741 */
742 if (offset == 0 && len == folio_size(folio)) {
743 WARN_ON_ONCE(folio_test_writeback(folio));
744 folio_cancel_dirty(folio);
745 ifs_free(folio);
746 }
747 }
748 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
749
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)750 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
751 {
752 struct inode *inode = mapping->host;
753 size_t len = folio_size(folio);
754
755 ifs_alloc(inode, folio, 0);
756 iomap_set_range_dirty(folio, 0, len);
757 return filemap_dirty_folio(mapping, folio);
758 }
759 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
760
761 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)762 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
763 {
764 loff_t i_size = i_size_read(inode);
765
766 /*
767 * Only truncate newly allocated pages beyoned EOF, even if the
768 * write started inside the existing inode size.
769 */
770 if (pos + len > i_size)
771 truncate_pagecache_range(inode, max(pos, i_size),
772 pos + len - 1);
773 }
774
__iomap_write_begin(const struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t len,struct folio * folio)775 static int __iomap_write_begin(const struct iomap_iter *iter,
776 const struct iomap_write_ops *write_ops, size_t len,
777 struct folio *folio)
778 {
779 struct iomap_folio_state *ifs;
780 loff_t pos = iter->pos;
781 loff_t block_size = i_blocksize(iter->inode);
782 loff_t block_start = round_down(pos, block_size);
783 loff_t block_end = round_up(pos + len, block_size);
784 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
785 size_t from = offset_in_folio(folio, pos), to = from + len;
786 size_t poff, plen;
787
788 /*
789 * If the write or zeroing completely overlaps the current folio, then
790 * entire folio will be dirtied so there is no need for
791 * per-block state tracking structures to be attached to this folio.
792 * For the unshare case, we must read in the ondisk contents because we
793 * are not changing pagecache contents.
794 */
795 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
796 pos + len >= folio_next_pos(folio))
797 return 0;
798
799 ifs = ifs_alloc(iter->inode, folio, iter->flags);
800 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
801 return -EAGAIN;
802
803 if (folio_test_uptodate(folio))
804 return 0;
805
806 do {
807 iomap_adjust_read_range(iter->inode, folio, &block_start,
808 block_end - block_start, &poff, &plen);
809 if (plen == 0)
810 break;
811
812 /*
813 * If the read range will be entirely overwritten by the write,
814 * we can skip having to zero/read it in.
815 */
816 if (!(iter->flags & IOMAP_UNSHARE) && from <= poff &&
817 to >= poff + plen)
818 continue;
819
820 if (iomap_block_needs_zeroing(iter, block_start)) {
821 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
822 return -EIO;
823 folio_zero_segments(folio, poff, from, to, poff + plen);
824 } else {
825 int status;
826
827 if (iter->flags & IOMAP_NOWAIT)
828 return -EAGAIN;
829
830 if (write_ops && write_ops->read_folio_range)
831 status = write_ops->read_folio_range(iter,
832 folio, block_start, plen);
833 else
834 status = iomap_bio_read_folio_range_sync(iter,
835 folio, block_start, plen);
836 if (status < 0)
837 fserror_report_io(iter->inode,
838 FSERR_BUFFERED_READ, pos,
839 len, status, GFP_NOFS);
840 if (status)
841 return status;
842 }
843 iomap_set_range_uptodate(folio, poff, plen);
844 } while ((block_start += plen) < block_end);
845
846 return 0;
847 }
848
__iomap_get_folio(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t len)849 static struct folio *__iomap_get_folio(struct iomap_iter *iter,
850 const struct iomap_write_ops *write_ops, size_t len)
851 {
852 loff_t pos = iter->pos;
853
854 if (!mapping_large_folio_support(iter->inode->i_mapping))
855 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
856
857 if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
858 struct folio *folio = folio_batch_next(iter->fbatch);
859
860 if (!folio)
861 return NULL;
862
863 /*
864 * The folio mapping generally shouldn't have changed based on
865 * fs locks, but be consistent with filemap lookup and retry
866 * the iter if it does.
867 */
868 folio_lock(folio);
869 if (unlikely(folio->mapping != iter->inode->i_mapping)) {
870 iter->iomap.flags |= IOMAP_F_STALE;
871 folio_unlock(folio);
872 return NULL;
873 }
874
875 folio_get(folio);
876 folio_wait_stable(folio);
877 return folio;
878 }
879
880 if (write_ops && write_ops->get_folio)
881 return write_ops->get_folio(iter, pos, len);
882 return iomap_get_folio(iter, pos, len);
883 }
884
__iomap_put_folio(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,size_t ret,struct folio * folio)885 static void __iomap_put_folio(struct iomap_iter *iter,
886 const struct iomap_write_ops *write_ops, size_t ret,
887 struct folio *folio)
888 {
889 loff_t pos = iter->pos;
890
891 if (write_ops && write_ops->put_folio) {
892 write_ops->put_folio(iter->inode, pos, ret, folio);
893 } else {
894 folio_unlock(folio);
895 folio_put(folio);
896 }
897 }
898
899 /* trim pos and bytes to within a given folio */
iomap_trim_folio_range(struct iomap_iter * iter,struct folio * folio,size_t * offset,u64 * bytes)900 static loff_t iomap_trim_folio_range(struct iomap_iter *iter,
901 struct folio *folio, size_t *offset, u64 *bytes)
902 {
903 loff_t pos = iter->pos;
904 size_t fsize = folio_size(folio);
905
906 WARN_ON_ONCE(pos < folio_pos(folio));
907 WARN_ON_ONCE(pos >= folio_pos(folio) + fsize);
908
909 *offset = offset_in_folio(folio, pos);
910 *bytes = min(*bytes, fsize - *offset);
911
912 return pos;
913 }
914
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)915 static int iomap_write_begin_inline(const struct iomap_iter *iter,
916 struct folio *folio)
917 {
918 /* needs more work for the tailpacking case; disable for now */
919 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
920 return -EIO;
921 return iomap_read_inline_data(iter, folio);
922 }
923
924 /*
925 * Grab and prepare a folio for write based on iter state. Returns the folio,
926 * offset, and length. Callers can optionally pass a max length *plen,
927 * otherwise init to zero.
928 */
iomap_write_begin(struct iomap_iter * iter,const struct iomap_write_ops * write_ops,struct folio ** foliop,size_t * poffset,u64 * plen)929 static int iomap_write_begin(struct iomap_iter *iter,
930 const struct iomap_write_ops *write_ops, struct folio **foliop,
931 size_t *poffset, u64 *plen)
932 {
933 const struct iomap *srcmap = iomap_iter_srcmap(iter);
934 loff_t pos;
935 u64 len = min_t(u64, SIZE_MAX, iomap_length(iter));
936 struct folio *folio;
937 int status = 0;
938
939 len = min_not_zero(len, *plen);
940 *foliop = NULL;
941 *plen = 0;
942
943 if (fatal_signal_pending(current))
944 return -EINTR;
945
946 folio = __iomap_get_folio(iter, write_ops, len);
947 if (IS_ERR(folio))
948 return PTR_ERR(folio);
949
950 /*
951 * No folio means we're done with a batch. We still have range to
952 * process so return and let the caller iterate and refill the batch.
953 */
954 if (!folio) {
955 WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
956 return 0;
957 }
958
959 /*
960 * Now we have a locked folio, before we do anything with it we need to
961 * check that the iomap we have cached is not stale. The inode extent
962 * mapping can change due to concurrent IO in flight (e.g.
963 * IOMAP_UNWRITTEN state can change and memory reclaim could have
964 * reclaimed a previously partially written page at this index after IO
965 * completion before this write reaches this file offset) and hence we
966 * could do the wrong thing here (zero a page range incorrectly or fail
967 * to zero) and corrupt data.
968 */
969 if (write_ops && write_ops->iomap_valid) {
970 bool iomap_valid = write_ops->iomap_valid(iter->inode,
971 &iter->iomap);
972 if (!iomap_valid) {
973 iter->iomap.flags |= IOMAP_F_STALE;
974 status = 0;
975 goto out_unlock;
976 }
977 }
978
979 /*
980 * The folios in a batch may not be contiguous. If we've skipped
981 * forward, advance the iter to the pos of the current folio. If the
982 * folio starts beyond the end of the mapping, it may have been trimmed
983 * since the lookup for whatever reason. Return a NULL folio to
984 * terminate the op.
985 */
986 if (folio_pos(folio) > iter->pos) {
987 len = min_t(u64, folio_pos(folio) - iter->pos,
988 iomap_length(iter));
989 status = iomap_iter_advance(iter, len);
990 len = iomap_length(iter);
991 if (status || !len)
992 goto out_unlock;
993 }
994
995 pos = iomap_trim_folio_range(iter, folio, poffset, &len);
996
997 if (srcmap->type == IOMAP_INLINE)
998 status = iomap_write_begin_inline(iter, folio);
999 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
1000 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
1001 else
1002 status = __iomap_write_begin(iter, write_ops, len, folio);
1003
1004 if (unlikely(status))
1005 goto out_unlock;
1006
1007 *foliop = folio;
1008 *plen = len;
1009 return 0;
1010
1011 out_unlock:
1012 __iomap_put_folio(iter, write_ops, 0, folio);
1013 return status;
1014 }
1015
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)1016 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
1017 size_t copied, struct folio *folio)
1018 {
1019 flush_dcache_folio(folio);
1020
1021 /*
1022 * The blocks that were entirely written will now be uptodate, so we
1023 * don't have to worry about a read_folio reading them and overwriting a
1024 * partial write. However, if we've encountered a short write and only
1025 * partially written into a block, it will not be marked uptodate, so a
1026 * read_folio might come in and destroy our partial write.
1027 *
1028 * Do the simplest thing and just treat any short write to a
1029 * non-uptodate page as a zero-length write, and force the caller to
1030 * redo the whole thing.
1031 */
1032 if (unlikely(copied < len && !folio_test_uptodate(folio)))
1033 return false;
1034 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
1035 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
1036 filemap_dirty_folio(inode->i_mapping, folio);
1037 return true;
1038 }
1039
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)1040 static bool iomap_write_end_inline(const struct iomap_iter *iter,
1041 struct folio *folio, loff_t pos, size_t copied)
1042 {
1043 const struct iomap *iomap = &iter->iomap;
1044 void *addr;
1045
1046 WARN_ON_ONCE(!folio_test_uptodate(folio));
1047 BUG_ON(!iomap_inline_data_valid(iomap));
1048
1049 if (WARN_ON_ONCE(!iomap->inline_data))
1050 return false;
1051
1052 flush_dcache_folio(folio);
1053 addr = kmap_local_folio(folio, pos);
1054 memcpy(iomap_inline_data(iomap, pos), addr, copied);
1055 kunmap_local(addr);
1056
1057 mark_inode_dirty(iter->inode);
1058 return true;
1059 }
1060
1061 /*
1062 * Returns true if all copied bytes have been written to the pagecache,
1063 * otherwise return false.
1064 */
iomap_write_end(struct iomap_iter * iter,size_t len,size_t copied,struct folio * folio)1065 static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
1066 struct folio *folio)
1067 {
1068 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1069 loff_t pos = iter->pos;
1070
1071 if (srcmap->type == IOMAP_INLINE)
1072 return iomap_write_end_inline(iter, folio, pos, copied);
1073
1074 if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
1075 size_t bh_written;
1076
1077 bh_written = block_write_end(pos, len, copied, folio);
1078 WARN_ON_ONCE(bh_written != copied && bh_written != 0);
1079 return bh_written == copied;
1080 }
1081
1082 return __iomap_write_end(iter->inode, pos, len, copied, folio);
1083 }
1084
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i,const struct iomap_write_ops * write_ops)1085 static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
1086 const struct iomap_write_ops *write_ops)
1087 {
1088 ssize_t total_written = 0;
1089 int status = 0;
1090 struct address_space *mapping = iter->inode->i_mapping;
1091 size_t chunk = mapping_max_folio_size(mapping);
1092 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
1093
1094 do {
1095 struct folio *folio;
1096 loff_t old_size;
1097 size_t offset; /* Offset into folio */
1098 u64 bytes; /* Bytes to write to folio */
1099 size_t copied; /* Bytes copied from user */
1100 u64 written; /* Bytes have been written */
1101 loff_t pos;
1102
1103 bytes = iov_iter_count(i);
1104 retry:
1105 offset = iter->pos & (chunk - 1);
1106 bytes = min(chunk - offset, bytes);
1107 status = balance_dirty_pages_ratelimited_flags(mapping,
1108 bdp_flags);
1109 if (unlikely(status))
1110 break;
1111
1112 if (bytes > iomap_length(iter))
1113 bytes = iomap_length(iter);
1114
1115 /*
1116 * Bring in the user page that we'll copy from _first_.
1117 * Otherwise there's a nasty deadlock on copying from the
1118 * same page as we're writing to, without it being marked
1119 * up-to-date.
1120 *
1121 * For async buffered writes the assumption is that the user
1122 * page has already been faulted in. This can be optimized by
1123 * faulting the user page.
1124 */
1125 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
1126 status = -EFAULT;
1127 break;
1128 }
1129
1130 status = iomap_write_begin(iter, write_ops, &folio, &offset,
1131 &bytes);
1132 if (unlikely(status)) {
1133 iomap_write_failed(iter->inode, iter->pos, bytes);
1134 break;
1135 }
1136 if (iter->iomap.flags & IOMAP_F_STALE)
1137 break;
1138
1139 pos = iter->pos;
1140
1141 if (mapping_writably_mapped(mapping))
1142 flush_dcache_folio(folio);
1143
1144 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
1145 written = iomap_write_end(iter, bytes, copied, folio) ?
1146 copied : 0;
1147
1148 /*
1149 * Update the in-memory inode size after copying the data into
1150 * the page cache. It's up to the file system to write the
1151 * updated size to disk, preferably after I/O completion so that
1152 * no stale data is exposed. Only once that's done can we
1153 * unlock and release the folio.
1154 */
1155 old_size = iter->inode->i_size;
1156 if (pos + written > old_size) {
1157 i_size_write(iter->inode, pos + written);
1158 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
1159 }
1160 __iomap_put_folio(iter, write_ops, written, folio);
1161
1162 if (old_size < pos)
1163 pagecache_isize_extended(iter->inode, old_size, pos);
1164
1165 cond_resched();
1166 if (unlikely(written == 0)) {
1167 /*
1168 * A short copy made iomap_write_end() reject the
1169 * thing entirely. Might be memory poisoning
1170 * halfway through, might be a race with munmap,
1171 * might be severe memory pressure.
1172 */
1173 iomap_write_failed(iter->inode, pos, bytes);
1174 iov_iter_revert(i, copied);
1175
1176 if (chunk > PAGE_SIZE)
1177 chunk /= 2;
1178 if (copied) {
1179 bytes = copied;
1180 goto retry;
1181 }
1182 } else {
1183 total_written += written;
1184 iomap_iter_advance(iter, written);
1185 }
1186 } while (iov_iter_count(i) && iomap_length(iter));
1187
1188 return total_written ? 0 : status;
1189 }
1190
1191 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1192 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1193 const struct iomap_ops *ops,
1194 const struct iomap_write_ops *write_ops, void *private)
1195 {
1196 struct iomap_iter iter = {
1197 .inode = iocb->ki_filp->f_mapping->host,
1198 .pos = iocb->ki_pos,
1199 .len = iov_iter_count(i),
1200 .flags = IOMAP_WRITE,
1201 .private = private,
1202 };
1203 ssize_t ret;
1204
1205 if (iocb->ki_flags & IOCB_NOWAIT)
1206 iter.flags |= IOMAP_NOWAIT;
1207 if (iocb->ki_flags & IOCB_DONTCACHE)
1208 iter.flags |= IOMAP_DONTCACHE;
1209
1210 while ((ret = iomap_iter(&iter, ops)) > 0)
1211 iter.status = iomap_write_iter(&iter, i, write_ops);
1212
1213 if (unlikely(iter.pos == iocb->ki_pos))
1214 return ret;
1215 ret = iter.pos - iocb->ki_pos;
1216 iocb->ki_pos = iter.pos;
1217 return ret;
1218 }
1219 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1220
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1221 static void iomap_write_delalloc_ifs_punch(struct inode *inode,
1222 struct folio *folio, loff_t start_byte, loff_t end_byte,
1223 struct iomap *iomap, iomap_punch_t punch)
1224 {
1225 unsigned int first_blk, last_blk;
1226 loff_t last_byte;
1227 u8 blkbits = inode->i_blkbits;
1228 struct iomap_folio_state *ifs;
1229
1230 /*
1231 * When we have per-block dirty tracking, there can be
1232 * blocks within a folio which are marked uptodate
1233 * but not dirty. In that case it is necessary to punch
1234 * out such blocks to avoid leaking any delalloc blocks.
1235 */
1236 ifs = folio->private;
1237 if (!ifs)
1238 return;
1239
1240 last_byte = min_t(loff_t, end_byte - 1, folio_next_pos(folio) - 1);
1241 first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1242 last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1243 while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
1244 <= last_blk) {
1245 punch(inode, folio_pos(folio) + (first_blk << blkbits),
1246 1 << blkbits, iomap);
1247 first_blk++;
1248 }
1249 }
1250
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1251 static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1252 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1253 struct iomap *iomap, iomap_punch_t punch)
1254 {
1255 if (!folio_test_dirty(folio))
1256 return;
1257
1258 /* if dirty, punch up to offset */
1259 if (start_byte > *punch_start_byte) {
1260 punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
1261 iomap);
1262 }
1263
1264 /* Punch non-dirty blocks within folio */
1265 iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
1266 iomap, punch);
1267
1268 /*
1269 * Make sure the next punch start is correctly bound to
1270 * the end of this data range, not the end of the folio.
1271 */
1272 *punch_start_byte = min_t(loff_t, end_byte, folio_next_pos(folio));
1273 }
1274
1275 /*
1276 * Scan the data range passed to us for dirty page cache folios. If we find a
1277 * dirty folio, punch out the preceding range and update the offset from which
1278 * the next punch will start from.
1279 *
1280 * We can punch out storage reservations under clean pages because they either
1281 * contain data that has been written back - in which case the delalloc punch
1282 * over that range is a no-op - or they have been read faults in which case they
1283 * contain zeroes and we can remove the delalloc backing range and any new
1284 * writes to those pages will do the normal hole filling operation...
1285 *
1286 * This makes the logic simple: we only need to keep the delalloc extents only
1287 * over the dirty ranges of the page cache.
1288 *
1289 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1290 * simplify range iterations.
1291 */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1292 static void iomap_write_delalloc_scan(struct inode *inode,
1293 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1294 struct iomap *iomap, iomap_punch_t punch)
1295 {
1296 while (start_byte < end_byte) {
1297 struct folio *folio;
1298
1299 /* grab locked page */
1300 folio = filemap_lock_folio(inode->i_mapping,
1301 start_byte >> PAGE_SHIFT);
1302 if (IS_ERR(folio)) {
1303 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1304 PAGE_SIZE;
1305 continue;
1306 }
1307
1308 iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1309 start_byte, end_byte, iomap, punch);
1310
1311 /* move offset to start of next folio in range */
1312 start_byte = folio_next_pos(folio);
1313 folio_unlock(folio);
1314 folio_put(folio);
1315 }
1316 }
1317
1318 /*
1319 * When a short write occurs, the filesystem might need to use ->iomap_end
1320 * to remove space reservations created in ->iomap_begin.
1321 *
1322 * For filesystems that use delayed allocation, there can be dirty pages over
1323 * the delalloc extent outside the range of a short write but still within the
1324 * delalloc extent allocated for this iomap if the write raced with page
1325 * faults.
1326 *
1327 * Punch out all the delalloc blocks in the range given except for those that
1328 * have dirty data still pending in the page cache - those are going to be
1329 * written and so must still retain the delalloc backing for writeback.
1330 *
1331 * The punch() callback *must* only punch delalloc extents in the range passed
1332 * to it. It must skip over all other types of extents in the range and leave
1333 * them completely unchanged. It must do this punch atomically with respect to
1334 * other extent modifications.
1335 *
1336 * The punch() callback may be called with a folio locked to prevent writeback
1337 * extent allocation racing at the edge of the range we are currently punching.
1338 * The locked folio may or may not cover the range being punched, so it is not
1339 * safe for the punch() callback to lock folios itself.
1340 *
1341 * Lock order is:
1342 *
1343 * inode->i_rwsem (shared or exclusive)
1344 * inode->i_mapping->invalidate_lock (exclusive)
1345 * folio_lock()
1346 * ->punch
1347 * internal filesystem allocation lock
1348 *
1349 * As we are scanning the page cache for data, we don't need to reimplement the
1350 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1351 * start and end of data ranges correctly even for sub-folio block sizes. This
1352 * byte range based iteration is especially convenient because it means we
1353 * don't have to care about variable size folios, nor where the start or end of
1354 * the data range lies within a folio, if they lie within the same folio or even
1355 * if there are multiple discontiguous data ranges within the folio.
1356 *
1357 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1358 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1359 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1360 * date. A write page fault can then mark it dirty. If we then fail a write()
1361 * beyond EOF into that up to date cached range, we allocate a delalloc block
1362 * beyond EOF and then have to punch it out. Because the range is up to date,
1363 * mapping_seek_hole_data() will return it, and we will skip the punch because
1364 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1365 * beyond EOF in this case as writeback will never write back and covert that
1366 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1367 * resulting in always punching out the range from the EOF to the end of the
1368 * range the iomap spans.
1369 *
1370 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1371 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1372 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1373 * returns the end of the data range (data_end). Using closed intervals would
1374 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1375 * the code to subtle off-by-one bugs....
1376 */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,unsigned flags,struct iomap * iomap,iomap_punch_t punch)1377 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1378 loff_t end_byte, unsigned flags, struct iomap *iomap,
1379 iomap_punch_t punch)
1380 {
1381 loff_t punch_start_byte = start_byte;
1382 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1383
1384 /*
1385 * The caller must hold invalidate_lock to avoid races with page faults
1386 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
1387 * we walk the cache and perform delalloc extent removal. Failing to do
1388 * this can leave dirty pages with no space reservation in the cache.
1389 */
1390 lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
1391
1392 while (start_byte < scan_end_byte) {
1393 loff_t data_end;
1394
1395 start_byte = mapping_seek_hole_data(inode->i_mapping,
1396 start_byte, scan_end_byte, SEEK_DATA);
1397 /*
1398 * If there is no more data to scan, all that is left is to
1399 * punch out the remaining range.
1400 *
1401 * Note that mapping_seek_hole_data is only supposed to return
1402 * either an offset or -ENXIO, so WARN on any other error as
1403 * that would be an API change without updating the callers.
1404 */
1405 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1406 break;
1407 if (WARN_ON_ONCE(start_byte < 0))
1408 return;
1409 WARN_ON_ONCE(start_byte < punch_start_byte);
1410 WARN_ON_ONCE(start_byte > scan_end_byte);
1411
1412 /*
1413 * We find the end of this contiguous cached data range by
1414 * seeking from start_byte to the beginning of the next hole.
1415 */
1416 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1417 scan_end_byte, SEEK_HOLE);
1418 if (WARN_ON_ONCE(data_end < 0))
1419 return;
1420
1421 /*
1422 * If we race with post-direct I/O invalidation of the page cache,
1423 * there might be no data left at start_byte.
1424 */
1425 if (data_end == start_byte)
1426 continue;
1427
1428 WARN_ON_ONCE(data_end < start_byte);
1429 WARN_ON_ONCE(data_end > scan_end_byte);
1430
1431 iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
1432 data_end, iomap, punch);
1433
1434 /* The next data search starts at the end of this one. */
1435 start_byte = data_end;
1436 }
1437
1438 if (punch_start_byte < end_byte)
1439 punch(inode, punch_start_byte, end_byte - punch_start_byte,
1440 iomap);
1441 }
1442 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
1443
iomap_unshare_iter(struct iomap_iter * iter,const struct iomap_write_ops * write_ops)1444 static int iomap_unshare_iter(struct iomap_iter *iter,
1445 const struct iomap_write_ops *write_ops)
1446 {
1447 struct iomap *iomap = &iter->iomap;
1448 u64 bytes = iomap_length(iter);
1449 int status;
1450
1451 if (!iomap_want_unshare_iter(iter))
1452 return iomap_iter_advance(iter, bytes);
1453
1454 do {
1455 struct folio *folio;
1456 size_t offset;
1457 bool ret;
1458
1459 bytes = min_t(u64, SIZE_MAX, bytes);
1460 status = iomap_write_begin(iter, write_ops, &folio, &offset,
1461 &bytes);
1462 if (unlikely(status))
1463 return status;
1464 if (iomap->flags & IOMAP_F_STALE)
1465 break;
1466
1467 ret = iomap_write_end(iter, bytes, bytes, folio);
1468 __iomap_put_folio(iter, write_ops, bytes, folio);
1469 if (WARN_ON_ONCE(!ret))
1470 return -EIO;
1471
1472 cond_resched();
1473
1474 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1475
1476 status = iomap_iter_advance(iter, bytes);
1477 if (status)
1478 break;
1479 } while ((bytes = iomap_length(iter)) > 0);
1480
1481 return status;
1482 }
1483
1484 int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops)1485 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1486 const struct iomap_ops *ops,
1487 const struct iomap_write_ops *write_ops)
1488 {
1489 struct iomap_iter iter = {
1490 .inode = inode,
1491 .pos = pos,
1492 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
1493 };
1494 loff_t size = i_size_read(inode);
1495 int ret;
1496
1497 if (pos < 0 || pos >= size)
1498 return 0;
1499
1500 iter.len = min(len, size - pos);
1501 while ((ret = iomap_iter(&iter, ops)) > 0)
1502 iter.status = iomap_unshare_iter(&iter, write_ops);
1503 return ret;
1504 }
1505 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1506
1507 /*
1508 * Flush the remaining range of the iter and mark the current mapping stale.
1509 * This is used when zero range sees an unwritten mapping that may have had
1510 * dirty pagecache over it.
1511 */
iomap_zero_iter_flush_and_stale(struct iomap_iter * i)1512 static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
1513 {
1514 struct address_space *mapping = i->inode->i_mapping;
1515 loff_t end = i->pos + i->len - 1;
1516
1517 i->iomap.flags |= IOMAP_F_STALE;
1518 return filemap_write_and_wait_range(mapping, i->pos, end);
1519 }
1520
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero,const struct iomap_write_ops * write_ops)1521 static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
1522 const struct iomap_write_ops *write_ops)
1523 {
1524 u64 bytes = iomap_length(iter);
1525 int status;
1526
1527 do {
1528 struct folio *folio;
1529 size_t offset;
1530 bool ret;
1531
1532 bytes = min_t(u64, SIZE_MAX, bytes);
1533 status = iomap_write_begin(iter, write_ops, &folio, &offset,
1534 &bytes);
1535 if (status)
1536 return status;
1537 if (iter->iomap.flags & IOMAP_F_STALE)
1538 break;
1539
1540 /* a NULL folio means we're done with a folio batch */
1541 if (!folio) {
1542 status = iomap_iter_advance_full(iter);
1543 break;
1544 }
1545
1546 /* warn about zeroing folios beyond eof that won't write back */
1547 WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
1548
1549 trace_iomap_zero_iter(iter->inode, folio_pos(folio) + offset,
1550 bytes);
1551
1552 folio_zero_range(folio, offset, bytes);
1553 folio_mark_accessed(folio);
1554
1555 ret = iomap_write_end(iter, bytes, bytes, folio);
1556 __iomap_put_folio(iter, write_ops, bytes, folio);
1557 if (WARN_ON_ONCE(!ret))
1558 return -EIO;
1559
1560 status = iomap_iter_advance(iter, bytes);
1561 if (status)
1562 break;
1563 } while ((bytes = iomap_length(iter)) > 0);
1564
1565 if (did_zero)
1566 *did_zero = true;
1567 return status;
1568 }
1569
1570 /**
1571 * iomap_fill_dirty_folios - fill a folio batch with dirty folios
1572 * @iter: Iteration structure
1573 * @start: Start offset of range. Updated based on lookup progress.
1574 * @end: End offset of range
1575 * @iomap_flags: Flags to set on the associated iomap to track the batch.
1576 *
1577 * Returns the folio count directly. Also returns the associated control flag if
1578 * the the batch lookup is performed and the expected offset of a subsequent
1579 * lookup via out params. The caller is responsible to set the flag on the
1580 * associated iomap.
1581 */
1582 unsigned int
iomap_fill_dirty_folios(struct iomap_iter * iter,loff_t * start,loff_t end,unsigned int * iomap_flags)1583 iomap_fill_dirty_folios(
1584 struct iomap_iter *iter,
1585 loff_t *start,
1586 loff_t end,
1587 unsigned int *iomap_flags)
1588 {
1589 struct address_space *mapping = iter->inode->i_mapping;
1590 pgoff_t pstart = *start >> PAGE_SHIFT;
1591 pgoff_t pend = (end - 1) >> PAGE_SHIFT;
1592 unsigned int count;
1593
1594 if (!iter->fbatch) {
1595 *start = end;
1596 return 0;
1597 }
1598
1599 count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
1600 *start = (pstart << PAGE_SHIFT);
1601 *iomap_flags |= IOMAP_F_FOLIO_BATCH;
1602 return count;
1603 }
1604 EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
1605
1606 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1607 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1608 const struct iomap_ops *ops,
1609 const struct iomap_write_ops *write_ops, void *private)
1610 {
1611 struct folio_batch fbatch;
1612 struct iomap_iter iter = {
1613 .inode = inode,
1614 .pos = pos,
1615 .len = len,
1616 .flags = IOMAP_ZERO,
1617 .private = private,
1618 .fbatch = &fbatch,
1619 };
1620 struct address_space *mapping = inode->i_mapping;
1621 int ret;
1622 bool range_dirty;
1623
1624 folio_batch_init(&fbatch);
1625
1626 /*
1627 * To avoid an unconditional flush, check pagecache state and only flush
1628 * if dirty and the fs returns a mapping that might convert on
1629 * writeback.
1630 */
1631 range_dirty = filemap_range_needs_writeback(mapping, iter.pos,
1632 iter.pos + iter.len - 1);
1633 while ((ret = iomap_iter(&iter, ops)) > 0) {
1634 const struct iomap *srcmap = iomap_iter_srcmap(&iter);
1635
1636 if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1637 srcmap->type != IOMAP_UNWRITTEN))
1638 return -EIO;
1639
1640 if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
1641 (srcmap->type == IOMAP_HOLE ||
1642 srcmap->type == IOMAP_UNWRITTEN)) {
1643 s64 status;
1644
1645 if (range_dirty) {
1646 range_dirty = false;
1647 status = iomap_zero_iter_flush_and_stale(&iter);
1648 } else {
1649 status = iomap_iter_advance_full(&iter);
1650 }
1651 iter.status = status;
1652 continue;
1653 }
1654
1655 iter.status = iomap_zero_iter(&iter, did_zero, write_ops);
1656 }
1657 return ret;
1658 }
1659 EXPORT_SYMBOL_GPL(iomap_zero_range);
1660
1661 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops,const struct iomap_write_ops * write_ops,void * private)1662 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1663 const struct iomap_ops *ops,
1664 const struct iomap_write_ops *write_ops, void *private)
1665 {
1666 unsigned int blocksize = i_blocksize(inode);
1667 unsigned int off = pos & (blocksize - 1);
1668
1669 /* Block boundary? Nothing to do */
1670 if (!off)
1671 return 0;
1672 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
1673 write_ops, private);
1674 }
1675 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1676
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1677 static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1678 struct folio *folio)
1679 {
1680 loff_t length = iomap_length(iter);
1681 int ret;
1682
1683 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1684 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1685 &iter->iomap);
1686 if (ret)
1687 return ret;
1688 block_commit_write(folio, 0, length);
1689 } else {
1690 WARN_ON_ONCE(!folio_test_uptodate(folio));
1691 folio_mark_dirty(folio);
1692 }
1693
1694 return iomap_iter_advance(iter, length);
1695 }
1696
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops,void * private)1697 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
1698 void *private)
1699 {
1700 struct iomap_iter iter = {
1701 .inode = file_inode(vmf->vma->vm_file),
1702 .flags = IOMAP_WRITE | IOMAP_FAULT,
1703 .private = private,
1704 };
1705 struct folio *folio = page_folio(vmf->page);
1706 ssize_t ret;
1707
1708 folio_lock(folio);
1709 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1710 if (ret < 0)
1711 goto out_unlock;
1712 iter.pos = folio_pos(folio);
1713 iter.len = ret;
1714 while ((ret = iomap_iter(&iter, ops)) > 0)
1715 iter.status = iomap_folio_mkwrite_iter(&iter, folio);
1716
1717 if (ret < 0)
1718 goto out_unlock;
1719 folio_wait_stable(folio);
1720 return VM_FAULT_LOCKED;
1721 out_unlock:
1722 folio_unlock(folio);
1723 return vmf_fs_error(ret);
1724 }
1725 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1726
iomap_writeback_init(struct inode * inode,struct folio * folio)1727 static void iomap_writeback_init(struct inode *inode, struct folio *folio)
1728 {
1729 struct iomap_folio_state *ifs = folio->private;
1730
1731 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1732 if (ifs) {
1733 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1734 /*
1735 * Set this to the folio size. After processing the folio for
1736 * writeback in iomap_writeback_folio(), we'll subtract any
1737 * ranges not written back.
1738 *
1739 * We do this because otherwise, we would have to atomically
1740 * increment ifs->write_bytes_pending every time a range in the
1741 * folio needs to be written back.
1742 */
1743 atomic_set(&ifs->write_bytes_pending, folio_size(folio));
1744 }
1745 }
1746
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len)1747 void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1748 size_t len)
1749 {
1750 struct iomap_folio_state *ifs = folio->private;
1751
1752 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1753 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1754
1755 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1756 folio_end_writeback(folio);
1757 }
1758 EXPORT_SYMBOL_GPL(iomap_finish_folio_write);
1759
iomap_writeback_range(struct iomap_writepage_ctx * wpc,struct folio * folio,u64 pos,u32 rlen,u64 end_pos,size_t * bytes_submitted)1760 static int iomap_writeback_range(struct iomap_writepage_ctx *wpc,
1761 struct folio *folio, u64 pos, u32 rlen, u64 end_pos,
1762 size_t *bytes_submitted)
1763 {
1764 do {
1765 ssize_t ret;
1766
1767 ret = wpc->ops->writeback_range(wpc, folio, pos, rlen, end_pos);
1768 if (WARN_ON_ONCE(ret == 0 || ret > rlen))
1769 return -EIO;
1770 if (ret < 0)
1771 return ret;
1772 rlen -= ret;
1773 pos += ret;
1774
1775 /*
1776 * Holes are not written back by ->writeback_range, so track
1777 * if we did handle anything that is not a hole here.
1778 */
1779 if (wpc->iomap.type != IOMAP_HOLE)
1780 *bytes_submitted += ret;
1781 } while (rlen);
1782
1783 return 0;
1784 }
1785
1786 /*
1787 * Check interaction of the folio with the file end.
1788 *
1789 * If the folio is entirely beyond i_size, return false. If it straddles
1790 * i_size, adjust end_pos and zero all data beyond i_size.
1791 */
iomap_writeback_handle_eof(struct folio * folio,struct inode * inode,u64 * end_pos)1792 static bool iomap_writeback_handle_eof(struct folio *folio, struct inode *inode,
1793 u64 *end_pos)
1794 {
1795 u64 isize = i_size_read(inode);
1796
1797 if (*end_pos > isize) {
1798 size_t poff = offset_in_folio(folio, isize);
1799 pgoff_t end_index = isize >> PAGE_SHIFT;
1800
1801 /*
1802 * If the folio is entirely ouside of i_size, skip it.
1803 *
1804 * This can happen due to a truncate operation that is in
1805 * progress and in that case truncate will finish it off once
1806 * we've dropped the folio lock.
1807 *
1808 * Note that the pgoff_t used for end_index is an unsigned long.
1809 * If the given offset is greater than 16TB on a 32-bit system,
1810 * then if we checked if the folio is fully outside i_size with
1811 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1812 * overflow and evaluate to 0. Hence this folio would be
1813 * redirtied and written out repeatedly, which would result in
1814 * an infinite loop; the user program performing this operation
1815 * would hang. Instead, we can detect this situation by
1816 * checking if the folio is totally beyond i_size or if its
1817 * offset is just equal to the EOF.
1818 */
1819 if (folio->index > end_index ||
1820 (folio->index == end_index && poff == 0))
1821 return false;
1822
1823 /*
1824 * The folio straddles i_size.
1825 *
1826 * It must be zeroed out on each and every writepage invocation
1827 * because it may be mmapped:
1828 *
1829 * A file is mapped in multiples of the page size. For a
1830 * file that is not a multiple of the page size, the
1831 * remaining memory is zeroed when mapped, and writes to that
1832 * region are not written out to the file.
1833 *
1834 * Also adjust the end_pos to the end of file and skip writeback
1835 * for all blocks entirely beyond i_size.
1836 */
1837 folio_zero_segment(folio, poff, folio_size(folio));
1838 *end_pos = isize;
1839 }
1840
1841 return true;
1842 }
1843
iomap_writeback_folio(struct iomap_writepage_ctx * wpc,struct folio * folio)1844 int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio)
1845 {
1846 struct iomap_folio_state *ifs = folio->private;
1847 struct inode *inode = wpc->inode;
1848 u64 pos = folio_pos(folio);
1849 u64 end_pos = pos + folio_size(folio);
1850 u64 end_aligned = 0;
1851 loff_t orig_pos = pos;
1852 size_t bytes_submitted = 0;
1853 int error = 0;
1854 u32 rlen;
1855
1856 WARN_ON_ONCE(!folio_test_locked(folio));
1857 WARN_ON_ONCE(folio_test_dirty(folio));
1858 WARN_ON_ONCE(folio_test_writeback(folio));
1859
1860 trace_iomap_writeback_folio(inode, pos, folio_size(folio));
1861
1862 if (!iomap_writeback_handle_eof(folio, inode, &end_pos))
1863 return 0;
1864 WARN_ON_ONCE(end_pos <= pos);
1865
1866 if (i_blocks_per_folio(inode, folio) > 1) {
1867 if (!ifs) {
1868 ifs = ifs_alloc(inode, folio, 0);
1869 iomap_set_range_dirty(folio, 0, end_pos - pos);
1870 }
1871
1872 iomap_writeback_init(inode, folio);
1873 }
1874
1875 /*
1876 * Set the writeback bit ASAP, as the I/O completion for the single
1877 * block per folio case happen hit as soon as we're submitting the bio.
1878 */
1879 folio_start_writeback(folio);
1880
1881 /*
1882 * Walk through the folio to find dirty areas to write back.
1883 */
1884 end_aligned = round_up(end_pos, i_blocksize(inode));
1885 while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
1886 error = iomap_writeback_range(wpc, folio, pos, rlen, end_pos,
1887 &bytes_submitted);
1888 if (error)
1889 break;
1890 pos += rlen;
1891 }
1892
1893 if (bytes_submitted)
1894 wpc->nr_folios++;
1895 if (error && pos > orig_pos)
1896 fserror_report_io(inode, FSERR_BUFFERED_WRITE, orig_pos, 0,
1897 error, GFP_NOFS);
1898
1899 /*
1900 * We can have dirty bits set past end of file in page_mkwrite path
1901 * while mapping the last partial folio. Hence it's better to clear
1902 * all the dirty bits in the folio here.
1903 */
1904 iomap_clear_range_dirty(folio, 0, folio_size(folio));
1905
1906 /*
1907 * Usually the writeback bit is cleared by the I/O completion handler.
1908 * But we may end up either not actually writing any blocks, or (when
1909 * there are multiple blocks in a folio) all I/O might have finished
1910 * already at this point. In that case we need to clear the writeback
1911 * bit ourselves right after unlocking the page.
1912 */
1913 if (ifs) {
1914 /*
1915 * Subtract any bytes that were initially accounted to
1916 * write_bytes_pending but skipped for writeback.
1917 */
1918 size_t bytes_not_submitted = folio_size(folio) -
1919 bytes_submitted;
1920
1921 if (bytes_not_submitted)
1922 iomap_finish_folio_write(inode, folio,
1923 bytes_not_submitted);
1924 } else if (!bytes_submitted) {
1925 folio_end_writeback(folio);
1926 }
1927
1928 mapping_set_error(inode->i_mapping, error);
1929 return error;
1930 }
1931 EXPORT_SYMBOL_GPL(iomap_writeback_folio);
1932
1933 int
iomap_writepages(struct iomap_writepage_ctx * wpc)1934 iomap_writepages(struct iomap_writepage_ctx *wpc)
1935 {
1936 struct address_space *mapping = wpc->inode->i_mapping;
1937 struct folio *folio = NULL;
1938 int error;
1939
1940 /*
1941 * Writeback from reclaim context should never happen except in the case
1942 * of a VM regression so warn about it and refuse to write the data.
1943 */
1944 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
1945 PF_MEMALLOC))
1946 return -EIO;
1947
1948 while ((folio = writeback_iter(mapping, wpc->wbc, folio, &error))) {
1949 error = iomap_writeback_folio(wpc, folio);
1950 folio_unlock(folio);
1951 }
1952
1953 /*
1954 * If @error is non-zero, it means that we have a situation where some
1955 * part of the submission process has failed after we've marked pages
1956 * for writeback.
1957 *
1958 * We cannot cancel the writeback directly in that case, so always call
1959 * ->writeback_submit to run the I/O completion handler to clear the
1960 * writeback bit and let the file system proess the errors.
1961 */
1962 if (wpc->wb_ctx)
1963 return wpc->ops->writeback_submit(wpc, error);
1964 return error;
1965 }
1966 EXPORT_SYMBOL_GPL(iomap_writepages);
1967