xref: /linux/fs/mpage.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/mpage.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  *
7  * Contains functions related to preparing and submitting BIOs which contain
8  * multiple pagecache pages.
9  *
10  * 15May2002	Andrew Morton
11  *		Initial version
12  * 27Jun2002	axboe@suse.de
13  *		use bio_add_page() to build bio's just the right size
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/mm.h>
19 #include <linux/kdev_t.h>
20 #include <linux/gfp.h>
21 #include <linux/bio.h>
22 #include <linux/fs.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/highmem.h>
26 #include <linux/prefetch.h>
27 #include <linux/mpage.h>
28 #include <linux/mm_inline.h>
29 #include <linux/writeback.h>
30 #include <linux/backing-dev.h>
31 #include "internal.h"
32 
33 /*
34  * I/O completion handler for multipage BIOs.
35  *
36  * The mpage code never puts partial pages into a BIO (except for end-of-file).
37  * If a page does not map to a contiguous run of blocks then it simply falls
38  * back to block_read_full_folio().
39  *
40  * Why is this?  If a page's completion depends on a number of different BIOs
41  * which can complete in any order (or at the same time) then determining the
42  * status of that page is hard.  See end_buffer_async_read() for the details.
43  * There is no point in duplicating all that complexity.
44  */
mpage_read_end_io(struct bio * bio)45 static void mpage_read_end_io(struct bio *bio)
46 {
47 	struct folio_iter fi;
48 	int err = blk_status_to_errno(bio->bi_status);
49 
50 	bio_for_each_folio_all(fi, bio)
51 		folio_end_read(fi.folio, err == 0);
52 
53 	bio_put(bio);
54 }
55 
mpage_write_end_io(struct bio * bio)56 static void mpage_write_end_io(struct bio *bio)
57 {
58 	struct folio_iter fi;
59 	int err = blk_status_to_errno(bio->bi_status);
60 
61 	bio_for_each_folio_all(fi, bio) {
62 		if (err)
63 			mapping_set_error(fi.folio->mapping, err);
64 		folio_end_writeback(fi.folio);
65 	}
66 
67 	bio_put(bio);
68 }
69 
mpage_bio_submit_read(struct bio * bio)70 static struct bio *mpage_bio_submit_read(struct bio *bio)
71 {
72 	bio->bi_end_io = mpage_read_end_io;
73 	guard_bio_eod(bio);
74 	submit_bio(bio);
75 	return NULL;
76 }
77 
mpage_bio_submit_write(struct bio * bio)78 static struct bio *mpage_bio_submit_write(struct bio *bio)
79 {
80 	bio->bi_end_io = mpage_write_end_io;
81 	guard_bio_eod(bio);
82 	submit_bio(bio);
83 	return NULL;
84 }
85 
86 /*
87  * support function for mpage_readahead.  The fs supplied get_block might
88  * return an up to date buffer.  This is used to map that buffer into
89  * the page, which allows read_folio to avoid triggering a duplicate call
90  * to get_block.
91  *
92  * The idea is to avoid adding buffers to pages that don't already have
93  * them.  So when the buffer is up to date and the page size == block size,
94  * this marks the page up to date instead of adding new buffers.
95  */
map_buffer_to_folio(struct folio * folio,struct buffer_head * bh,int page_block)96 static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
97 		int page_block)
98 {
99 	struct inode *inode = folio->mapping->host;
100 	struct buffer_head *page_bh, *head;
101 	int block = 0;
102 
103 	head = folio_buffers(folio);
104 	if (!head) {
105 		/*
106 		 * don't make any buffers if there is only one buffer on
107 		 * the folio and the folio just needs to be set up to date
108 		 */
109 		if (inode->i_blkbits == folio_shift(folio) &&
110 		    buffer_uptodate(bh)) {
111 			folio_mark_uptodate(folio);
112 			return;
113 		}
114 		head = create_empty_buffers(folio, i_blocksize(inode), 0);
115 	}
116 
117 	page_bh = head;
118 	do {
119 		if (block == page_block) {
120 			page_bh->b_state = bh->b_state;
121 			page_bh->b_bdev = bh->b_bdev;
122 			page_bh->b_blocknr = bh->b_blocknr;
123 			break;
124 		}
125 		page_bh = page_bh->b_this_page;
126 		block++;
127 	} while (page_bh != head);
128 }
129 
130 struct mpage_readpage_args {
131 	struct bio *bio;
132 	struct folio *folio;
133 	unsigned int nr_pages;
134 	bool is_readahead;
135 	sector_t last_block_in_bio;
136 	struct buffer_head map_bh;
137 	unsigned long first_logical_block;
138 	get_block_t *get_block;
139 };
140 
141 /*
142  * This is the worker routine which does all the work of mapping the disk
143  * blocks and constructs largest possible bios, submits them for IO if the
144  * blocks are not contiguous on the disk.
145  *
146  * We pass a buffer_head back and forth and use its buffer_mapped() flag to
147  * represent the validity of its disk mapping and to decide when to do the next
148  * get_block() call.
149  */
do_mpage_readpage(struct mpage_readpage_args * args)150 static void do_mpage_readpage(struct mpage_readpage_args *args)
151 {
152 	struct folio *folio = args->folio;
153 	struct inode *inode = folio->mapping->host;
154 	const unsigned blkbits = inode->i_blkbits;
155 	const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
156 	const unsigned blocksize = 1 << blkbits;
157 	struct buffer_head *map_bh = &args->map_bh;
158 	sector_t block_in_file;
159 	sector_t last_block;
160 	sector_t last_block_in_file;
161 	sector_t first_block;
162 	unsigned page_block;
163 	unsigned first_hole = blocks_per_folio;
164 	struct block_device *bdev = NULL;
165 	int length;
166 	int fully_mapped = 1;
167 	blk_opf_t opf = REQ_OP_READ;
168 	unsigned nblocks;
169 	unsigned relative_block;
170 	gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
171 
172 	if (args->is_readahead) {
173 		opf |= REQ_RAHEAD;
174 		gfp |= __GFP_NORETRY | __GFP_NOWARN;
175 	}
176 
177 	if (folio_buffers(folio))
178 		goto confused;
179 
180 	block_in_file = folio_pos(folio) >> blkbits;
181 	last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
182 	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
183 	if (last_block > last_block_in_file)
184 		last_block = last_block_in_file;
185 	page_block = 0;
186 
187 	/*
188 	 * Map blocks using the result from the previous get_blocks call first.
189 	 */
190 	nblocks = map_bh->b_size >> blkbits;
191 	if (buffer_mapped(map_bh) &&
192 			block_in_file > args->first_logical_block &&
193 			block_in_file < (args->first_logical_block + nblocks)) {
194 		unsigned map_offset = block_in_file - args->first_logical_block;
195 		unsigned last = nblocks - map_offset;
196 
197 		first_block = map_bh->b_blocknr + map_offset;
198 		for (relative_block = 0; ; relative_block++) {
199 			if (relative_block == last) {
200 				clear_buffer_mapped(map_bh);
201 				break;
202 			}
203 			if (page_block == blocks_per_folio)
204 				break;
205 			page_block++;
206 			block_in_file++;
207 		}
208 		bdev = map_bh->b_bdev;
209 	}
210 
211 	/*
212 	 * Then do more get_blocks calls until we are done with this folio.
213 	 */
214 	map_bh->b_folio = folio;
215 	while (page_block < blocks_per_folio) {
216 		map_bh->b_state = 0;
217 		map_bh->b_size = 0;
218 
219 		if (block_in_file < last_block) {
220 			map_bh->b_size = (last_block-block_in_file) << blkbits;
221 			if (args->get_block(inode, block_in_file, map_bh, 0))
222 				goto confused;
223 			args->first_logical_block = block_in_file;
224 		}
225 
226 		if (!buffer_mapped(map_bh)) {
227 			fully_mapped = 0;
228 			if (first_hole == blocks_per_folio)
229 				first_hole = page_block;
230 			page_block++;
231 			block_in_file++;
232 			continue;
233 		}
234 
235 		/* some filesystems will copy data into the page during
236 		 * the get_block call, in which case we don't want to
237 		 * read it again.  map_buffer_to_folio copies the data
238 		 * we just collected from get_block into the folio's buffers
239 		 * so read_folio doesn't have to repeat the get_block call
240 		 */
241 		if (buffer_uptodate(map_bh)) {
242 			map_buffer_to_folio(folio, map_bh, page_block);
243 			goto confused;
244 		}
245 
246 		if (first_hole != blocks_per_folio)
247 			goto confused;		/* hole -> non-hole */
248 
249 		/* Contiguous blocks? */
250 		if (!page_block)
251 			first_block = map_bh->b_blocknr;
252 		else if (first_block + page_block != map_bh->b_blocknr)
253 			goto confused;
254 		nblocks = map_bh->b_size >> blkbits;
255 		for (relative_block = 0; ; relative_block++) {
256 			if (relative_block == nblocks) {
257 				clear_buffer_mapped(map_bh);
258 				break;
259 			} else if (page_block == blocks_per_folio)
260 				break;
261 			page_block++;
262 			block_in_file++;
263 		}
264 		bdev = map_bh->b_bdev;
265 	}
266 
267 	if (first_hole != blocks_per_folio) {
268 		folio_zero_segment(folio, first_hole << blkbits, folio_size(folio));
269 		if (first_hole == 0) {
270 			folio_mark_uptodate(folio);
271 			folio_unlock(folio);
272 			goto out;
273 		}
274 	} else if (fully_mapped) {
275 		folio_set_mappedtodisk(folio);
276 	}
277 
278 	/*
279 	 * This folio will go to BIO.  Do we need to send this BIO off first?
280 	 */
281 	if (args->bio && (args->last_block_in_bio != first_block - 1))
282 		args->bio = mpage_bio_submit_read(args->bio);
283 
284 alloc_new:
285 	if (args->bio == NULL) {
286 		args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
287 				      gfp);
288 		if (args->bio == NULL)
289 			goto confused;
290 		args->bio->bi_iter.bi_sector = first_block << (blkbits - 9);
291 	}
292 
293 	length = first_hole << blkbits;
294 	if (!bio_add_folio(args->bio, folio, length, 0)) {
295 		args->bio = mpage_bio_submit_read(args->bio);
296 		goto alloc_new;
297 	}
298 
299 	relative_block = block_in_file - args->first_logical_block;
300 	nblocks = map_bh->b_size >> blkbits;
301 	if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
302 	    (first_hole != blocks_per_folio))
303 		args->bio = mpage_bio_submit_read(args->bio);
304 	else
305 		args->last_block_in_bio = first_block + blocks_per_folio - 1;
306 out:
307 	return;
308 
309 confused:
310 	if (args->bio)
311 		args->bio = mpage_bio_submit_read(args->bio);
312 	if (!folio_test_uptodate(folio))
313 		block_read_full_folio(folio, args->get_block);
314 	else
315 		folio_unlock(folio);
316 	goto out;
317 }
318 
319 /**
320  * mpage_readahead - start reads against pages
321  * @rac: Describes which pages to read.
322  * @get_block: The filesystem's block mapper function.
323  *
324  * This function walks the pages and the blocks within each page, building and
325  * emitting large BIOs.
326  *
327  * If anything unusual happens, such as:
328  *
329  * - encountering a page which has buffers
330  * - encountering a page which has a non-hole after a hole
331  * - encountering a page with non-contiguous blocks
332  *
333  * then this code just gives up and calls the buffer_head-based read function.
334  * It does handle a page which has holes at the end - that is a common case:
335  * the end-of-file on blocksize < PAGE_SIZE setups.
336  *
337  * BH_Boundary explanation:
338  *
339  * There is a problem.  The mpage read code assembles several pages, gets all
340  * their disk mappings, and then submits them all.  That's fine, but obtaining
341  * the disk mappings may require I/O.  Reads of indirect blocks, for example.
342  *
343  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
344  * submitted in the following order:
345  *
346  * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
347  *
348  * because the indirect block has to be read to get the mappings of blocks
349  * 13,14,15,16.  Obviously, this impacts performance.
350  *
351  * So what we do it to allow the filesystem's get_block() function to set
352  * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
353  * after this one will require I/O against a block which is probably close to
354  * this one.  So you should push what I/O you have currently accumulated.
355  *
356  * This all causes the disk requests to be issued in the correct order.
357  */
mpage_readahead(struct readahead_control * rac,get_block_t get_block)358 void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
359 {
360 	struct folio *folio;
361 	struct mpage_readpage_args args = {
362 		.get_block = get_block,
363 		.is_readahead = true,
364 	};
365 
366 	while ((folio = readahead_folio(rac))) {
367 		prefetchw(&folio->flags);
368 		args.folio = folio;
369 		args.nr_pages = readahead_count(rac);
370 		do_mpage_readpage(&args);
371 		/*
372 		 * If read ahead failed synchronously, it may cause by removed
373 		 * device, or some filesystem metadata error.
374 		 */
375 		if (!folio_test_locked(folio) && !folio_test_uptodate(folio))
376 			break;
377 	}
378 	if (args.bio)
379 		mpage_bio_submit_read(args.bio);
380 }
381 EXPORT_SYMBOL(mpage_readahead);
382 
383 /*
384  * This isn't called much at all
385  */
mpage_read_folio(struct folio * folio,get_block_t get_block)386 int mpage_read_folio(struct folio *folio, get_block_t get_block)
387 {
388 	struct mpage_readpage_args args = {
389 		.folio = folio,
390 		.nr_pages = folio_nr_pages(folio),
391 		.get_block = get_block,
392 	};
393 
394 	do_mpage_readpage(&args);
395 	if (args.bio)
396 		mpage_bio_submit_read(args.bio);
397 	return 0;
398 }
399 EXPORT_SYMBOL(mpage_read_folio);
400 
401 /*
402  * Writing is not so simple.
403  *
404  * If the page has buffers then they will be used for obtaining the disk
405  * mapping.  We only support pages which are fully mapped-and-dirty, with a
406  * special case for pages which are unmapped at the end: end-of-file.
407  *
408  * If the page has no buffers (preferred) then the page is mapped here.
409  *
410  * If all blocks are found to be contiguous then the page can go into the
411  * BIO.  Otherwise fall back to the mapping's writepage().
412  *
413  * FIXME: This code wants an estimate of how many pages are still to be
414  * written, so it can intelligently allocate a suitably-sized BIO.  For now,
415  * just allocate full-size (16-page) BIOs.
416  */
417 
418 struct mpage_data {
419 	struct bio *bio;
420 	sector_t last_block_in_bio;
421 	get_block_t *get_block;
422 };
423 
424 /*
425  * We have our BIO, so we can now mark the buffers clean.  Make
426  * sure to only clean buffers which we know we'll be writing.
427  */
clean_buffers(struct folio * folio,unsigned first_unmapped)428 static void clean_buffers(struct folio *folio, unsigned first_unmapped)
429 {
430 	unsigned buffer_counter = 0;
431 	struct buffer_head *bh, *head = folio_buffers(folio);
432 
433 	if (!head)
434 		return;
435 	bh = head;
436 
437 	do {
438 		if (buffer_counter++ == first_unmapped)
439 			break;
440 		clear_buffer_dirty(bh);
441 		bh = bh->b_this_page;
442 	} while (bh != head);
443 
444 	/*
445 	 * we cannot drop the bh if the page is not uptodate or a concurrent
446 	 * read_folio would fail to serialize with the bh and it would read from
447 	 * disk before we reach the platter.
448 	 */
449 	if (buffer_heads_over_limit && folio_test_uptodate(folio))
450 		try_to_free_buffers(folio);
451 }
452 
mpage_write_folio(struct writeback_control * wbc,struct folio * folio,struct mpage_data * mpd)453 static int mpage_write_folio(struct writeback_control *wbc, struct folio *folio,
454 		struct mpage_data *mpd)
455 {
456 	struct bio *bio = mpd->bio;
457 	struct address_space *mapping = folio->mapping;
458 	struct inode *inode = mapping->host;
459 	const unsigned blkbits = inode->i_blkbits;
460 	const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
461 	sector_t last_block;
462 	sector_t block_in_file;
463 	sector_t first_block;
464 	unsigned page_block;
465 	unsigned first_unmapped = blocks_per_folio;
466 	struct block_device *bdev = NULL;
467 	int boundary = 0;
468 	sector_t boundary_block = 0;
469 	struct block_device *boundary_bdev = NULL;
470 	size_t length;
471 	struct buffer_head map_bh;
472 	loff_t i_size = i_size_read(inode);
473 	int ret = 0;
474 	struct buffer_head *head = folio_buffers(folio);
475 
476 	if (head) {
477 		struct buffer_head *bh = head;
478 
479 		/* If they're all mapped and dirty, do it */
480 		page_block = 0;
481 		do {
482 			BUG_ON(buffer_locked(bh));
483 			if (!buffer_mapped(bh)) {
484 				/*
485 				 * unmapped dirty buffers are created by
486 				 * block_dirty_folio -> mmapped data
487 				 */
488 				if (buffer_dirty(bh))
489 					goto confused;
490 				if (first_unmapped == blocks_per_folio)
491 					first_unmapped = page_block;
492 				continue;
493 			}
494 
495 			if (first_unmapped != blocks_per_folio)
496 				goto confused;	/* hole -> non-hole */
497 
498 			if (!buffer_dirty(bh) || !buffer_uptodate(bh))
499 				goto confused;
500 			if (page_block) {
501 				if (bh->b_blocknr != first_block + page_block)
502 					goto confused;
503 			} else {
504 				first_block = bh->b_blocknr;
505 			}
506 			page_block++;
507 			boundary = buffer_boundary(bh);
508 			if (boundary) {
509 				boundary_block = bh->b_blocknr;
510 				boundary_bdev = bh->b_bdev;
511 			}
512 			bdev = bh->b_bdev;
513 		} while ((bh = bh->b_this_page) != head);
514 
515 		if (first_unmapped)
516 			goto page_is_mapped;
517 
518 		/*
519 		 * Page has buffers, but they are all unmapped. The page was
520 		 * created by pagein or read over a hole which was handled by
521 		 * block_read_full_folio().  If this address_space is also
522 		 * using mpage_readahead then this can rarely happen.
523 		 */
524 		goto confused;
525 	}
526 
527 	/*
528 	 * The page has no buffers: map it to disk
529 	 */
530 	BUG_ON(!folio_test_uptodate(folio));
531 	block_in_file = folio_pos(folio) >> blkbits;
532 	/*
533 	 * Whole page beyond EOF? Skip allocating blocks to avoid leaking
534 	 * space.
535 	 */
536 	if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
537 		goto page_is_mapped;
538 	last_block = (i_size - 1) >> blkbits;
539 	map_bh.b_folio = folio;
540 	for (page_block = 0; page_block < blocks_per_folio; ) {
541 
542 		map_bh.b_state = 0;
543 		map_bh.b_size = 1 << blkbits;
544 		if (mpd->get_block(inode, block_in_file, &map_bh, 1))
545 			goto confused;
546 		if (!buffer_mapped(&map_bh))
547 			goto confused;
548 		if (buffer_new(&map_bh))
549 			clean_bdev_bh_alias(&map_bh);
550 		if (buffer_boundary(&map_bh)) {
551 			boundary_block = map_bh.b_blocknr;
552 			boundary_bdev = map_bh.b_bdev;
553 		}
554 		if (page_block) {
555 			if (map_bh.b_blocknr != first_block + page_block)
556 				goto confused;
557 		} else {
558 			first_block = map_bh.b_blocknr;
559 		}
560 		page_block++;
561 		boundary = buffer_boundary(&map_bh);
562 		bdev = map_bh.b_bdev;
563 		if (block_in_file == last_block)
564 			break;
565 		block_in_file++;
566 	}
567 	BUG_ON(page_block == 0);
568 
569 	first_unmapped = page_block;
570 
571 page_is_mapped:
572 	/* Don't bother writing beyond EOF, truncate will discard the folio */
573 	if (folio_pos(folio) >= i_size)
574 		goto confused;
575 	length = folio_size(folio);
576 	if (folio_pos(folio) + length > i_size) {
577 		/*
578 		 * The page straddles i_size.  It must be zeroed out on each
579 		 * and every writepage invocation because it may be mmapped.
580 		 * "A file is mapped in multiples of the page size.  For a file
581 		 * that is not a multiple of the page size, the remaining memory
582 		 * is zeroed when mapped, and writes to that region are not
583 		 * written out to the file."
584 		 */
585 		length = i_size - folio_pos(folio);
586 		folio_zero_segment(folio, length, folio_size(folio));
587 	}
588 
589 	/*
590 	 * This page will go to BIO.  Do we need to send this BIO off first?
591 	 */
592 	if (bio && mpd->last_block_in_bio != first_block - 1)
593 		bio = mpage_bio_submit_write(bio);
594 
595 alloc_new:
596 	if (bio == NULL) {
597 		bio = bio_alloc(bdev, BIO_MAX_VECS,
598 				REQ_OP_WRITE | wbc_to_write_flags(wbc),
599 				GFP_NOFS);
600 		bio->bi_iter.bi_sector = first_block << (blkbits - 9);
601 		wbc_init_bio(wbc, bio);
602 		bio->bi_write_hint = inode->i_write_hint;
603 	}
604 
605 	/*
606 	 * Must try to add the page before marking the buffer clean or
607 	 * the confused fail path above (OOM) will be very confused when
608 	 * it finds all bh marked clean (i.e. it will not write anything)
609 	 */
610 	wbc_account_cgroup_owner(wbc, folio, folio_size(folio));
611 	length = first_unmapped << blkbits;
612 	if (!bio_add_folio(bio, folio, length, 0)) {
613 		bio = mpage_bio_submit_write(bio);
614 		goto alloc_new;
615 	}
616 
617 	clean_buffers(folio, first_unmapped);
618 
619 	BUG_ON(folio_test_writeback(folio));
620 	folio_start_writeback(folio);
621 	folio_unlock(folio);
622 	if (boundary || (first_unmapped != blocks_per_folio)) {
623 		bio = mpage_bio_submit_write(bio);
624 		if (boundary_block) {
625 			write_boundary_block(boundary_bdev,
626 					boundary_block, 1 << blkbits);
627 		}
628 	} else {
629 		mpd->last_block_in_bio = first_block + blocks_per_folio - 1;
630 	}
631 	goto out;
632 
633 confused:
634 	if (bio)
635 		bio = mpage_bio_submit_write(bio);
636 
637 	/*
638 	 * The caller has a ref on the inode, so *mapping is stable
639 	 */
640 	ret = block_write_full_folio(folio, wbc, mpd->get_block);
641 	mapping_set_error(mapping, ret);
642 out:
643 	mpd->bio = bio;
644 	return ret;
645 }
646 
647 /**
648  * __mpage_writepages - walk the list of dirty pages of the given address space
649  * 			& writepage() all of them
650  * @mapping: address space structure to write
651  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
652  * @get_block: the filesystem's block mapper function.
653  * @write_folio: handler to call for each folio before calling
654  *		 mpage_write_folio()
655  *
656  * This is a library function, which implements the writepages()
657  * address_space_operation. It calls @write_folio handler for each folio. If
658  * the handler returns value > 0, it calls mpage_write_folio() to do the
659  * folio writeback.
660  */
661 int
__mpage_writepages(struct address_space * mapping,struct writeback_control * wbc,get_block_t get_block,int (* write_folio)(struct folio * folio,struct writeback_control * wbc))662 __mpage_writepages(struct address_space *mapping,
663 		   struct writeback_control *wbc, get_block_t get_block,
664 		   int (*write_folio)(struct folio *folio,
665 				      struct writeback_control *wbc))
666 {
667 	struct mpage_data mpd = {
668 		.get_block	= get_block,
669 	};
670 	struct folio *folio = NULL;
671 	struct blk_plug plug;
672 	int error;
673 
674 	blk_start_plug(&plug);
675 	while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
676 		if (write_folio) {
677 			error = write_folio(folio, wbc);
678 			/*
679 			 * == 0 means folio is handled, < 0 means error. In
680 			 * both cases hand back control to writeback_iter()
681 			 */
682 			if (error <= 0)
683 				continue;
684 			/* Let mpage_write_folio() handle the folio. */
685 		}
686 		error = mpage_write_folio(wbc, folio, &mpd);
687 	}
688 	if (mpd.bio)
689 		mpage_bio_submit_write(mpd.bio);
690 	blk_finish_plug(&plug);
691 	return error;
692 }
693 EXPORT_SYMBOL(__mpage_writepages);
694