xref: /linux/fs/buffer.c (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7 
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/blk-crypto.h>
33 #include <linux/file.h>
34 #include <linux/quotaops.h>
35 #include <linux/highmem.h>
36 #include <linux/export.h>
37 #include <linux/backing-dev.h>
38 #include <linux/writeback.h>
39 #include <linux/hash.h>
40 #include <linux/suspend.h>
41 #include <linux/buffer_head.h>
42 #include <linux/task_io_accounting_ops.h>
43 #include <linux/bio.h>
44 #include <linux/cpu.h>
45 #include <linux/bitops.h>
46 #include <linux/mpage.h>
47 #include <linux/bit_spinlock.h>
48 #include <linux/pagevec.h>
49 #include <linux/sched/mm.h>
50 #include <trace/events/block.h>
51 #include <linux/fscrypt.h>
52 #include <linux/fsverity.h>
53 #include <linux/sched/isolation.h>
54 
55 #include "internal.h"
56 
57 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
58 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
59 			  enum rw_hint hint, struct writeback_control *wbc);
60 
61 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
62 
63 inline void touch_buffer(struct buffer_head *bh)
64 {
65 	trace_block_touch_buffer(bh);
66 	folio_mark_accessed(bh->b_folio);
67 }
68 EXPORT_SYMBOL(touch_buffer);
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
73 }
74 EXPORT_SYMBOL(__lock_buffer);
75 
76 void unlock_buffer(struct buffer_head *bh)
77 {
78 	clear_bit_unlock(BH_Lock, &bh->b_state);
79 	smp_mb__after_atomic();
80 	wake_up_bit(&bh->b_state, BH_Lock);
81 }
82 EXPORT_SYMBOL(unlock_buffer);
83 
84 /*
85  * Returns if the folio has dirty or writeback buffers. If all the buffers
86  * are unlocked and clean then the folio_test_dirty information is stale. If
87  * any of the buffers are locked, it is assumed they are locked for IO.
88  */
89 void buffer_check_dirty_writeback(struct folio *folio,
90 				     bool *dirty, bool *writeback)
91 {
92 	struct buffer_head *head, *bh;
93 	*dirty = false;
94 	*writeback = false;
95 
96 	BUG_ON(!folio_test_locked(folio));
97 
98 	head = folio_buffers(folio);
99 	if (!head)
100 		return;
101 
102 	if (folio_test_writeback(folio))
103 		*writeback = true;
104 
105 	bh = head;
106 	do {
107 		if (buffer_locked(bh))
108 			*writeback = true;
109 
110 		if (buffer_dirty(bh))
111 			*dirty = true;
112 
113 		bh = bh->b_this_page;
114 	} while (bh != head);
115 }
116 
117 /*
118  * Block until a buffer comes unlocked.  This doesn't stop it
119  * from becoming locked again - you have to lock it yourself
120  * if you want to preserve its state.
121  */
122 void __wait_on_buffer(struct buffer_head * bh)
123 {
124 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
125 }
126 EXPORT_SYMBOL(__wait_on_buffer);
127 
128 static void buffer_io_error(struct buffer_head *bh, char *msg)
129 {
130 	if (!test_bit(BH_Quiet, &bh->b_state))
131 		printk_ratelimited(KERN_ERR
132 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
133 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
134 }
135 
136 /*
137  * End-of-IO handler helper function which does not touch the bh after
138  * unlocking it.
139  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
140  * a race there is benign: unlock_buffer() only use the bh's address for
141  * hashing after unlocking the buffer, so it doesn't actually touch the bh
142  * itself.
143  */
144 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
145 {
146 	if (uptodate) {
147 		set_buffer_uptodate(bh);
148 	} else {
149 		/* This happens, due to failed read-ahead attempts. */
150 		clear_buffer_uptodate(bh);
151 	}
152 	unlock_buffer(bh);
153 }
154 
155 /*
156  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
157  * unlock the buffer.
158  */
159 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
160 {
161 	put_bh(bh);
162 	__end_buffer_read_notouch(bh, uptodate);
163 }
164 EXPORT_SYMBOL(end_buffer_read_sync);
165 
166 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
167 {
168 	if (uptodate) {
169 		set_buffer_uptodate(bh);
170 	} else {
171 		buffer_io_error(bh, ", lost sync page write");
172 		mark_buffer_write_io_error(bh);
173 		clear_buffer_uptodate(bh);
174 	}
175 	unlock_buffer(bh);
176 	put_bh(bh);
177 }
178 EXPORT_SYMBOL(end_buffer_write_sync);
179 
180 static struct buffer_head *
181 __find_get_block_slow(struct block_device *bdev, sector_t block, bool atomic)
182 {
183 	struct address_space *bd_mapping = bdev->bd_mapping;
184 	const int blkbits = bd_mapping->host->i_blkbits;
185 	struct buffer_head *ret = NULL;
186 	pgoff_t index;
187 	struct buffer_head *bh;
188 	struct buffer_head *head;
189 	struct folio *folio;
190 	int all_mapped = 1;
191 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
192 
193 	index = ((loff_t)block << blkbits) / PAGE_SIZE;
194 	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
195 	if (IS_ERR(folio))
196 		goto out;
197 
198 	/*
199 	 * Folio lock protects the buffers. Callers that cannot block
200 	 * will fallback to serializing vs try_to_free_buffers() via
201 	 * the i_private_lock.
202 	 */
203 	if (atomic)
204 		spin_lock(&bd_mapping->i_private_lock);
205 	else
206 		folio_lock(folio);
207 
208 	head = folio_buffers(folio);
209 	if (!head)
210 		goto out_unlock;
211 	/*
212 	 * Upon a noref migration, the folio lock serializes here;
213 	 * otherwise bail.
214 	 */
215 	if (test_bit_acquire(BH_Migrate, &head->b_state)) {
216 		WARN_ON(!atomic);
217 		goto out_unlock;
218 	}
219 
220 	bh = head;
221 	do {
222 		if (!buffer_mapped(bh))
223 			all_mapped = 0;
224 		else if (bh->b_blocknr == block) {
225 			ret = bh;
226 			get_bh(bh);
227 			goto out_unlock;
228 		}
229 		bh = bh->b_this_page;
230 	} while (bh != head);
231 
232 	/* we might be here because some of the buffers on this page are
233 	 * not mapped.  This is due to various races between
234 	 * file io on the block device and getblk.  It gets dealt with
235 	 * elsewhere, don't buffer_error if we had some unmapped buffers
236 	 */
237 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
238 	if (all_mapped && __ratelimit(&last_warned)) {
239 		printk("__find_get_block_slow() failed. block=%llu, "
240 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
241 		       "device %pg blocksize: %d\n",
242 		       (unsigned long long)block,
243 		       (unsigned long long)bh->b_blocknr,
244 		       bh->b_state, bh->b_size, bdev,
245 		       1 << blkbits);
246 	}
247 out_unlock:
248 	if (atomic)
249 		spin_unlock(&bd_mapping->i_private_lock);
250 	else
251 		folio_unlock(folio);
252 	folio_put(folio);
253 out:
254 	return ret;
255 }
256 
257 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
258 {
259 	unsigned long flags;
260 	struct buffer_head *first;
261 	struct buffer_head *tmp;
262 	struct folio *folio;
263 	int folio_uptodate = 1;
264 
265 	BUG_ON(!buffer_async_read(bh));
266 
267 	folio = bh->b_folio;
268 	if (uptodate) {
269 		set_buffer_uptodate(bh);
270 	} else {
271 		clear_buffer_uptodate(bh);
272 		buffer_io_error(bh, ", async page read");
273 	}
274 
275 	/*
276 	 * Be _very_ careful from here on. Bad things can happen if
277 	 * two buffer heads end IO at almost the same time and both
278 	 * decide that the page is now completely done.
279 	 */
280 	first = folio_buffers(folio);
281 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
282 	clear_buffer_async_read(bh);
283 	unlock_buffer(bh);
284 	tmp = bh;
285 	do {
286 		if (!buffer_uptodate(tmp))
287 			folio_uptodate = 0;
288 		if (buffer_async_read(tmp)) {
289 			BUG_ON(!buffer_locked(tmp));
290 			goto still_busy;
291 		}
292 		tmp = tmp->b_this_page;
293 	} while (tmp != bh);
294 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
295 
296 	folio_end_read(folio, folio_uptodate);
297 	return;
298 
299 still_busy:
300 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
301 }
302 
303 struct postprocess_bh_ctx {
304 	struct work_struct work;
305 	struct buffer_head *bh;
306 };
307 
308 static void verify_bh(struct work_struct *work)
309 {
310 	struct postprocess_bh_ctx *ctx =
311 		container_of(work, struct postprocess_bh_ctx, work);
312 	struct buffer_head *bh = ctx->bh;
313 	bool valid;
314 
315 	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
316 	end_buffer_async_read(bh, valid);
317 	kfree(ctx);
318 }
319 
320 static bool need_fsverity(struct buffer_head *bh)
321 {
322 	struct folio *folio = bh->b_folio;
323 	struct inode *inode = folio->mapping->host;
324 
325 	return fsverity_active(inode) &&
326 		/* needed by ext4 */
327 		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
328 }
329 
330 static void decrypt_bh(struct work_struct *work)
331 {
332 	struct postprocess_bh_ctx *ctx =
333 		container_of(work, struct postprocess_bh_ctx, work);
334 	struct buffer_head *bh = ctx->bh;
335 	int err;
336 
337 	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
338 					       bh_offset(bh));
339 	if (err == 0 && need_fsverity(bh)) {
340 		/*
341 		 * We use different work queues for decryption and for verity
342 		 * because verity may require reading metadata pages that need
343 		 * decryption, and we shouldn't recurse to the same workqueue.
344 		 */
345 		INIT_WORK(&ctx->work, verify_bh);
346 		fsverity_enqueue_verify_work(&ctx->work);
347 		return;
348 	}
349 	end_buffer_async_read(bh, err == 0);
350 	kfree(ctx);
351 }
352 
353 /*
354  * I/O completion handler for block_read_full_folio() - pages
355  * which come unlocked at the end of I/O.
356  */
357 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
358 {
359 	struct inode *inode = bh->b_folio->mapping->host;
360 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
361 	bool verify = need_fsverity(bh);
362 
363 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
364 	if (uptodate && (decrypt || verify)) {
365 		struct postprocess_bh_ctx *ctx =
366 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
367 
368 		if (ctx) {
369 			ctx->bh = bh;
370 			if (decrypt) {
371 				INIT_WORK(&ctx->work, decrypt_bh);
372 				fscrypt_enqueue_decrypt_work(&ctx->work);
373 			} else {
374 				INIT_WORK(&ctx->work, verify_bh);
375 				fsverity_enqueue_verify_work(&ctx->work);
376 			}
377 			return;
378 		}
379 		uptodate = 0;
380 	}
381 	end_buffer_async_read(bh, uptodate);
382 }
383 
384 /*
385  * Completion handler for block_write_full_folio() - folios which are unlocked
386  * during I/O, and which have the writeback flag cleared upon I/O completion.
387  */
388 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
389 {
390 	unsigned long flags;
391 	struct buffer_head *first;
392 	struct buffer_head *tmp;
393 	struct folio *folio;
394 
395 	BUG_ON(!buffer_async_write(bh));
396 
397 	folio = bh->b_folio;
398 	if (uptodate) {
399 		set_buffer_uptodate(bh);
400 	} else {
401 		buffer_io_error(bh, ", lost async page write");
402 		mark_buffer_write_io_error(bh);
403 		clear_buffer_uptodate(bh);
404 	}
405 
406 	first = folio_buffers(folio);
407 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
408 
409 	clear_buffer_async_write(bh);
410 	unlock_buffer(bh);
411 	tmp = bh->b_this_page;
412 	while (tmp != bh) {
413 		if (buffer_async_write(tmp)) {
414 			BUG_ON(!buffer_locked(tmp));
415 			goto still_busy;
416 		}
417 		tmp = tmp->b_this_page;
418 	}
419 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
420 	folio_end_writeback(folio);
421 	return;
422 
423 still_busy:
424 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
425 }
426 
427 /*
428  * If a page's buffers are under async readin (end_buffer_async_read
429  * completion) then there is a possibility that another thread of
430  * control could lock one of the buffers after it has completed
431  * but while some of the other buffers have not completed.  This
432  * locked buffer would confuse end_buffer_async_read() into not unlocking
433  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
434  * that this buffer is not under async I/O.
435  *
436  * The page comes unlocked when it has no locked buffer_async buffers
437  * left.
438  *
439  * PageLocked prevents anyone starting new async I/O reads any of
440  * the buffers.
441  *
442  * PageWriteback is used to prevent simultaneous writeout of the same
443  * page.
444  *
445  * PageLocked prevents anyone from starting writeback of a page which is
446  * under read I/O (PageWriteback is only ever set against a locked page).
447  */
448 static void mark_buffer_async_read(struct buffer_head *bh)
449 {
450 	bh->b_end_io = end_buffer_async_read_io;
451 	set_buffer_async_read(bh);
452 }
453 
454 static void mark_buffer_async_write_endio(struct buffer_head *bh,
455 					  bh_end_io_t *handler)
456 {
457 	bh->b_end_io = handler;
458 	set_buffer_async_write(bh);
459 }
460 
461 void mark_buffer_async_write(struct buffer_head *bh)
462 {
463 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
464 }
465 EXPORT_SYMBOL(mark_buffer_async_write);
466 
467 
468 /*
469  * fs/buffer.c contains helper functions for buffer-backed address space's
470  * fsync functions.  A common requirement for buffer-based filesystems is
471  * that certain data from the backing blockdev needs to be written out for
472  * a successful fsync().  For example, ext2 indirect blocks need to be
473  * written back and waited upon before fsync() returns.
474  *
475  * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
476  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
477  * management of a list of dependent buffers at ->i_mapping->i_private_list.
478  *
479  * Locking is a little subtle: try_to_free_buffers() will remove buffers
480  * from their controlling inode's queue when they are being freed.  But
481  * try_to_free_buffers() will be operating against the *blockdev* mapping
482  * at the time, not against the S_ISREG file which depends on those buffers.
483  * So the locking for i_private_list is via the i_private_lock in the address_space
484  * which backs the buffers.  Which is different from the address_space
485  * against which the buffers are listed.  So for a particular address_space,
486  * mapping->i_private_lock does *not* protect mapping->i_private_list!  In fact,
487  * mapping->i_private_list will always be protected by the backing blockdev's
488  * ->i_private_lock.
489  *
490  * Which introduces a requirement: all buffers on an address_space's
491  * ->i_private_list must be from the same address_space: the blockdev's.
492  *
493  * address_spaces which do not place buffers at ->i_private_list via these
494  * utility functions are free to use i_private_lock and i_private_list for
495  * whatever they want.  The only requirement is that list_empty(i_private_list)
496  * be true at clear_inode() time.
497  *
498  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
499  * filesystems should do that.  invalidate_inode_buffers() should just go
500  * BUG_ON(!list_empty).
501  *
502  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
503  * take an address_space, not an inode.  And it should be called
504  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
505  * queued up.
506  *
507  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
508  * list if it is already on a list.  Because if the buffer is on a list,
509  * it *must* already be on the right one.  If not, the filesystem is being
510  * silly.  This will save a ton of locking.  But first we have to ensure
511  * that buffers are taken *off* the old inode's list when they are freed
512  * (presumably in truncate).  That requires careful auditing of all
513  * filesystems (do it inside bforget()).  It could also be done by bringing
514  * b_inode back.
515  */
516 
517 /*
518  * The buffer's backing address_space's i_private_lock must be held
519  */
520 static void __remove_assoc_queue(struct buffer_head *bh)
521 {
522 	list_del_init(&bh->b_assoc_buffers);
523 	WARN_ON(!bh->b_assoc_map);
524 	bh->b_assoc_map = NULL;
525 }
526 
527 int inode_has_buffers(struct inode *inode)
528 {
529 	return !list_empty(&inode->i_data.i_private_list);
530 }
531 
532 /*
533  * osync is designed to support O_SYNC io.  It waits synchronously for
534  * all already-submitted IO to complete, but does not queue any new
535  * writes to the disk.
536  *
537  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
538  * as you dirty the buffers, and then use osync_inode_buffers to wait for
539  * completion.  Any other dirty buffers which are not yet queued for
540  * write will not be flushed to disk by the osync.
541  */
542 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
543 {
544 	struct buffer_head *bh;
545 	struct list_head *p;
546 	int err = 0;
547 
548 	spin_lock(lock);
549 repeat:
550 	list_for_each_prev(p, list) {
551 		bh = BH_ENTRY(p);
552 		if (buffer_locked(bh)) {
553 			get_bh(bh);
554 			spin_unlock(lock);
555 			wait_on_buffer(bh);
556 			if (!buffer_uptodate(bh))
557 				err = -EIO;
558 			brelse(bh);
559 			spin_lock(lock);
560 			goto repeat;
561 		}
562 	}
563 	spin_unlock(lock);
564 	return err;
565 }
566 
567 /**
568  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
569  * @mapping: the mapping which wants those buffers written
570  *
571  * Starts I/O against the buffers at mapping->i_private_list, and waits upon
572  * that I/O.
573  *
574  * Basically, this is a convenience function for fsync().
575  * @mapping is a file or directory which needs those buffers to be written for
576  * a successful fsync().
577  */
578 int sync_mapping_buffers(struct address_space *mapping)
579 {
580 	struct address_space *buffer_mapping = mapping->i_private_data;
581 
582 	if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
583 		return 0;
584 
585 	return fsync_buffers_list(&buffer_mapping->i_private_lock,
586 					&mapping->i_private_list);
587 }
588 EXPORT_SYMBOL(sync_mapping_buffers);
589 
590 /**
591  * generic_buffers_fsync_noflush - generic buffer fsync implementation
592  * for simple filesystems with no inode lock
593  *
594  * @file:	file to synchronize
595  * @start:	start offset in bytes
596  * @end:	end offset in bytes (inclusive)
597  * @datasync:	only synchronize essential metadata if true
598  *
599  * This is a generic implementation of the fsync method for simple
600  * filesystems which track all non-inode metadata in the buffers list
601  * hanging off the address_space structure.
602  */
603 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
604 				  bool datasync)
605 {
606 	struct inode *inode = file->f_mapping->host;
607 	int err;
608 	int ret;
609 
610 	err = file_write_and_wait_range(file, start, end);
611 	if (err)
612 		return err;
613 
614 	ret = sync_mapping_buffers(inode->i_mapping);
615 	if (!(inode_state_read_once(inode) & I_DIRTY_ALL))
616 		goto out;
617 	if (datasync && !(inode_state_read_once(inode) & I_DIRTY_DATASYNC))
618 		goto out;
619 
620 	err = sync_inode_metadata(inode, 1);
621 	if (ret == 0)
622 		ret = err;
623 
624 out:
625 	/* check and advance again to catch errors after syncing out buffers */
626 	err = file_check_and_advance_wb_err(file);
627 	if (ret == 0)
628 		ret = err;
629 	return ret;
630 }
631 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
632 
633 /**
634  * generic_buffers_fsync - generic buffer fsync implementation
635  * for simple filesystems with no inode lock
636  *
637  * @file:	file to synchronize
638  * @start:	start offset in bytes
639  * @end:	end offset in bytes (inclusive)
640  * @datasync:	only synchronize essential metadata if true
641  *
642  * This is a generic implementation of the fsync method for simple
643  * filesystems which track all non-inode metadata in the buffers list
644  * hanging off the address_space structure. This also makes sure that
645  * a device cache flush operation is called at the end.
646  */
647 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
648 			  bool datasync)
649 {
650 	struct inode *inode = file->f_mapping->host;
651 	int ret;
652 
653 	ret = generic_buffers_fsync_noflush(file, start, end, datasync);
654 	if (!ret)
655 		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
656 	return ret;
657 }
658 EXPORT_SYMBOL(generic_buffers_fsync);
659 
660 /*
661  * Called when we've recently written block `bblock', and it is known that
662  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
663  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
664  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
665  */
666 void write_boundary_block(struct block_device *bdev,
667 			sector_t bblock, unsigned blocksize)
668 {
669 	struct buffer_head *bh;
670 
671 	bh = __find_get_block_nonatomic(bdev, bblock + 1, blocksize);
672 	if (bh) {
673 		if (buffer_dirty(bh))
674 			write_dirty_buffer(bh, 0);
675 		put_bh(bh);
676 	}
677 }
678 
679 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
680 {
681 	struct address_space *mapping = inode->i_mapping;
682 	struct address_space *buffer_mapping = bh->b_folio->mapping;
683 
684 	mark_buffer_dirty(bh);
685 	if (!mapping->i_private_data) {
686 		mapping->i_private_data = buffer_mapping;
687 	} else {
688 		BUG_ON(mapping->i_private_data != buffer_mapping);
689 	}
690 	if (!bh->b_assoc_map) {
691 		spin_lock(&buffer_mapping->i_private_lock);
692 		list_move_tail(&bh->b_assoc_buffers,
693 				&mapping->i_private_list);
694 		bh->b_assoc_map = mapping;
695 		spin_unlock(&buffer_mapping->i_private_lock);
696 	}
697 }
698 EXPORT_SYMBOL(mark_buffer_dirty_inode);
699 
700 /**
701  * block_dirty_folio - Mark a folio as dirty.
702  * @mapping: The address space containing this folio.
703  * @folio: The folio to mark dirty.
704  *
705  * Filesystems which use buffer_heads can use this function as their
706  * ->dirty_folio implementation.  Some filesystems need to do a little
707  * work before calling this function.  Filesystems which do not use
708  * buffer_heads should call filemap_dirty_folio() instead.
709  *
710  * If the folio has buffers, the uptodate buffers are set dirty, to
711  * preserve dirty-state coherency between the folio and the buffers.
712  * Buffers added to a dirty folio are created dirty.
713  *
714  * The buffers are dirtied before the folio is dirtied.  There's a small
715  * race window in which writeback may see the folio cleanness but not the
716  * buffer dirtiness.  That's fine.  If this code were to set the folio
717  * dirty before the buffers, writeback could clear the folio dirty flag,
718  * see a bunch of clean buffers and we'd end up with dirty buffers/clean
719  * folio on the dirty folio list.
720  *
721  * We use i_private_lock to lock against try_to_free_buffers() while
722  * using the folio's buffer list.  This also prevents clean buffers
723  * being added to the folio after it was set dirty.
724  *
725  * Context: May only be called from process context.  Does not sleep.
726  * Caller must ensure that @folio cannot be truncated during this call,
727  * typically by holding the folio lock or having a page in the folio
728  * mapped and holding the page table lock.
729  *
730  * Return: True if the folio was dirtied; false if it was already dirtied.
731  */
732 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
733 {
734 	struct buffer_head *head;
735 	bool newly_dirty;
736 
737 	spin_lock(&mapping->i_private_lock);
738 	head = folio_buffers(folio);
739 	if (head) {
740 		struct buffer_head *bh = head;
741 
742 		do {
743 			set_buffer_dirty(bh);
744 			bh = bh->b_this_page;
745 		} while (bh != head);
746 	}
747 	/*
748 	 * Lock out page's memcg migration to keep PageDirty
749 	 * synchronized with per-memcg dirty page counters.
750 	 */
751 	newly_dirty = !folio_test_set_dirty(folio);
752 	spin_unlock(&mapping->i_private_lock);
753 
754 	if (newly_dirty)
755 		__folio_mark_dirty(folio, mapping, 1);
756 
757 	if (newly_dirty)
758 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
759 
760 	return newly_dirty;
761 }
762 EXPORT_SYMBOL(block_dirty_folio);
763 
764 /*
765  * Write out and wait upon a list of buffers.
766  *
767  * We have conflicting pressures: we want to make sure that all
768  * initially dirty buffers get waited on, but that any subsequently
769  * dirtied buffers don't.  After all, we don't want fsync to last
770  * forever if somebody is actively writing to the file.
771  *
772  * Do this in two main stages: first we copy dirty buffers to a
773  * temporary inode list, queueing the writes as we go.  Then we clean
774  * up, waiting for those writes to complete.
775  *
776  * During this second stage, any subsequent updates to the file may end
777  * up refiling the buffer on the original inode's dirty list again, so
778  * there is a chance we will end up with a buffer queued for write but
779  * not yet completed on that list.  So, as a final cleanup we go through
780  * the osync code to catch these locked, dirty buffers without requeuing
781  * any newly dirty buffers for write.
782  */
783 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
784 {
785 	struct buffer_head *bh;
786 	struct address_space *mapping;
787 	int err = 0, err2;
788 	struct blk_plug plug;
789 	LIST_HEAD(tmp);
790 
791 	blk_start_plug(&plug);
792 
793 	spin_lock(lock);
794 	while (!list_empty(list)) {
795 		bh = BH_ENTRY(list->next);
796 		mapping = bh->b_assoc_map;
797 		__remove_assoc_queue(bh);
798 		/* Avoid race with mark_buffer_dirty_inode() which does
799 		 * a lockless check and we rely on seeing the dirty bit */
800 		smp_mb();
801 		if (buffer_dirty(bh) || buffer_locked(bh)) {
802 			list_add(&bh->b_assoc_buffers, &tmp);
803 			bh->b_assoc_map = mapping;
804 			if (buffer_dirty(bh)) {
805 				get_bh(bh);
806 				spin_unlock(lock);
807 				/*
808 				 * Ensure any pending I/O completes so that
809 				 * write_dirty_buffer() actually writes the
810 				 * current contents - it is a noop if I/O is
811 				 * still in flight on potentially older
812 				 * contents.
813 				 */
814 				write_dirty_buffer(bh, REQ_SYNC);
815 
816 				/*
817 				 * Kick off IO for the previous mapping. Note
818 				 * that we will not run the very last mapping,
819 				 * wait_on_buffer() will do that for us
820 				 * through sync_buffer().
821 				 */
822 				brelse(bh);
823 				spin_lock(lock);
824 			}
825 		}
826 	}
827 
828 	spin_unlock(lock);
829 	blk_finish_plug(&plug);
830 	spin_lock(lock);
831 
832 	while (!list_empty(&tmp)) {
833 		bh = BH_ENTRY(tmp.prev);
834 		get_bh(bh);
835 		mapping = bh->b_assoc_map;
836 		__remove_assoc_queue(bh);
837 		/* Avoid race with mark_buffer_dirty_inode() which does
838 		 * a lockless check and we rely on seeing the dirty bit */
839 		smp_mb();
840 		if (buffer_dirty(bh)) {
841 			list_add(&bh->b_assoc_buffers,
842 				 &mapping->i_private_list);
843 			bh->b_assoc_map = mapping;
844 		}
845 		spin_unlock(lock);
846 		wait_on_buffer(bh);
847 		if (!buffer_uptodate(bh))
848 			err = -EIO;
849 		brelse(bh);
850 		spin_lock(lock);
851 	}
852 
853 	spin_unlock(lock);
854 	err2 = osync_buffers_list(lock, list);
855 	if (err)
856 		return err;
857 	else
858 		return err2;
859 }
860 
861 /*
862  * Invalidate any and all dirty buffers on a given inode.  We are
863  * probably unmounting the fs, but that doesn't mean we have already
864  * done a sync().  Just drop the buffers from the inode list.
865  *
866  * NOTE: we take the inode's blockdev's mapping's i_private_lock.  Which
867  * assumes that all the buffers are against the blockdev.
868  */
869 void invalidate_inode_buffers(struct inode *inode)
870 {
871 	if (inode_has_buffers(inode)) {
872 		struct address_space *mapping = &inode->i_data;
873 		struct list_head *list = &mapping->i_private_list;
874 		struct address_space *buffer_mapping = mapping->i_private_data;
875 
876 		spin_lock(&buffer_mapping->i_private_lock);
877 		while (!list_empty(list))
878 			__remove_assoc_queue(BH_ENTRY(list->next));
879 		spin_unlock(&buffer_mapping->i_private_lock);
880 	}
881 }
882 EXPORT_SYMBOL(invalidate_inode_buffers);
883 
884 /*
885  * Remove any clean buffers from the inode's buffer list.  This is called
886  * when we're trying to free the inode itself.  Those buffers can pin it.
887  *
888  * Returns true if all buffers were removed.
889  */
890 int remove_inode_buffers(struct inode *inode)
891 {
892 	int ret = 1;
893 
894 	if (inode_has_buffers(inode)) {
895 		struct address_space *mapping = &inode->i_data;
896 		struct list_head *list = &mapping->i_private_list;
897 		struct address_space *buffer_mapping = mapping->i_private_data;
898 
899 		spin_lock(&buffer_mapping->i_private_lock);
900 		while (!list_empty(list)) {
901 			struct buffer_head *bh = BH_ENTRY(list->next);
902 			if (buffer_dirty(bh)) {
903 				ret = 0;
904 				break;
905 			}
906 			__remove_assoc_queue(bh);
907 		}
908 		spin_unlock(&buffer_mapping->i_private_lock);
909 	}
910 	return ret;
911 }
912 
913 /*
914  * Create the appropriate buffers when given a folio for data area and
915  * the size of each buffer.. Use the bh->b_this_page linked list to
916  * follow the buffers created.  Return NULL if unable to create more
917  * buffers.
918  *
919  * The retry flag is used to differentiate async IO (paging, swapping)
920  * which may not fail from ordinary buffer allocations.
921  */
922 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
923 					gfp_t gfp)
924 {
925 	struct buffer_head *bh, *head;
926 	long offset;
927 	struct mem_cgroup *memcg, *old_memcg;
928 
929 	/* The folio lock pins the memcg */
930 	memcg = folio_memcg(folio);
931 	old_memcg = set_active_memcg(memcg);
932 
933 	head = NULL;
934 	offset = folio_size(folio);
935 	while ((offset -= size) >= 0) {
936 		bh = alloc_buffer_head(gfp);
937 		if (!bh)
938 			goto no_grow;
939 
940 		bh->b_this_page = head;
941 		bh->b_blocknr = -1;
942 		head = bh;
943 
944 		bh->b_size = size;
945 
946 		/* Link the buffer to its folio */
947 		folio_set_bh(bh, folio, offset);
948 	}
949 out:
950 	set_active_memcg(old_memcg);
951 	return head;
952 /*
953  * In case anything failed, we just free everything we got.
954  */
955 no_grow:
956 	if (head) {
957 		do {
958 			bh = head;
959 			head = head->b_this_page;
960 			free_buffer_head(bh);
961 		} while (head);
962 	}
963 
964 	goto out;
965 }
966 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
967 
968 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
969 {
970 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
971 
972 	return folio_alloc_buffers(page_folio(page), size, gfp);
973 }
974 EXPORT_SYMBOL_GPL(alloc_page_buffers);
975 
976 static inline void link_dev_buffers(struct folio *folio,
977 		struct buffer_head *head)
978 {
979 	struct buffer_head *bh, *tail;
980 
981 	bh = head;
982 	do {
983 		tail = bh;
984 		bh = bh->b_this_page;
985 	} while (bh);
986 	tail->b_this_page = head;
987 	folio_attach_private(folio, head);
988 }
989 
990 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
991 {
992 	sector_t retval = ~((sector_t)0);
993 	loff_t sz = bdev_nr_bytes(bdev);
994 
995 	if (sz) {
996 		unsigned int sizebits = blksize_bits(size);
997 		retval = (sz >> sizebits);
998 	}
999 	return retval;
1000 }
1001 
1002 /*
1003  * Initialise the state of a blockdev folio's buffers.
1004  */
1005 static sector_t folio_init_buffers(struct folio *folio,
1006 		struct block_device *bdev, unsigned size)
1007 {
1008 	struct buffer_head *head = folio_buffers(folio);
1009 	struct buffer_head *bh = head;
1010 	bool uptodate = folio_test_uptodate(folio);
1011 	sector_t block = div_u64(folio_pos(folio), size);
1012 	sector_t end_block = blkdev_max_block(bdev, size);
1013 
1014 	do {
1015 		if (!buffer_mapped(bh)) {
1016 			bh->b_end_io = NULL;
1017 			bh->b_private = NULL;
1018 			bh->b_bdev = bdev;
1019 			bh->b_blocknr = block;
1020 			if (uptodate)
1021 				set_buffer_uptodate(bh);
1022 			if (block < end_block)
1023 				set_buffer_mapped(bh);
1024 		}
1025 		block++;
1026 		bh = bh->b_this_page;
1027 	} while (bh != head);
1028 
1029 	/*
1030 	 * Caller needs to validate requested block against end of device.
1031 	 */
1032 	return end_block;
1033 }
1034 
1035 /*
1036  * Create the page-cache folio that contains the requested block.
1037  *
1038  * This is used purely for blockdev mappings.
1039  *
1040  * Returns false if we have a failure which cannot be cured by retrying
1041  * without sleeping.  Returns true if we succeeded, or the caller should retry.
1042  */
1043 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1044 		pgoff_t index, unsigned size, gfp_t gfp)
1045 {
1046 	struct address_space *mapping = bdev->bd_mapping;
1047 	struct folio *folio;
1048 	struct buffer_head *bh;
1049 	sector_t end_block = 0;
1050 
1051 	folio = __filemap_get_folio(mapping, index,
1052 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1053 	if (IS_ERR(folio))
1054 		return false;
1055 
1056 	bh = folio_buffers(folio);
1057 	if (bh) {
1058 		if (bh->b_size == size) {
1059 			end_block = folio_init_buffers(folio, bdev, size);
1060 			goto unlock;
1061 		}
1062 
1063 		/*
1064 		 * Retrying may succeed; for example the folio may finish
1065 		 * writeback, or buffers may be cleaned.  This should not
1066 		 * happen very often; maybe we have old buffers attached to
1067 		 * this blockdev's page cache and we're trying to change
1068 		 * the block size?
1069 		 */
1070 		if (!try_to_free_buffers(folio)) {
1071 			end_block = ~0ULL;
1072 			goto unlock;
1073 		}
1074 	}
1075 
1076 	bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1077 	if (!bh)
1078 		goto unlock;
1079 
1080 	/*
1081 	 * Link the folio to the buffers and initialise them.  Take the
1082 	 * lock to be atomic wrt __find_get_block(), which does not
1083 	 * run under the folio lock.
1084 	 */
1085 	spin_lock(&mapping->i_private_lock);
1086 	link_dev_buffers(folio, bh);
1087 	end_block = folio_init_buffers(folio, bdev, size);
1088 	spin_unlock(&mapping->i_private_lock);
1089 unlock:
1090 	folio_unlock(folio);
1091 	folio_put(folio);
1092 	return block < end_block;
1093 }
1094 
1095 /*
1096  * Create buffers for the specified block device block's folio.  If
1097  * that folio was dirty, the buffers are set dirty also.  Returns false
1098  * if we've hit a permanent error.
1099  */
1100 static bool grow_buffers(struct block_device *bdev, sector_t block,
1101 		unsigned size, gfp_t gfp)
1102 {
1103 	loff_t pos;
1104 
1105 	/*
1106 	 * Check for a block which lies outside our maximum possible
1107 	 * pagecache index.
1108 	 */
1109 	if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1110 		printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1111 			__func__, (unsigned long long)block,
1112 			bdev);
1113 		return false;
1114 	}
1115 
1116 	/* Create a folio with the proper size buffers */
1117 	return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1118 }
1119 
1120 static struct buffer_head *
1121 __getblk_slow(struct block_device *bdev, sector_t block,
1122 	     unsigned size, gfp_t gfp)
1123 {
1124 	bool blocking = gfpflags_allow_blocking(gfp);
1125 
1126 	if (WARN_ON_ONCE(!IS_ALIGNED(size, bdev_logical_block_size(bdev)))) {
1127 		printk(KERN_ERR "getblk(): block size %d not aligned to logical block size %d\n",
1128 		       size, bdev_logical_block_size(bdev));
1129 		return NULL;
1130 	}
1131 
1132 	for (;;) {
1133 		struct buffer_head *bh;
1134 
1135 		if (!grow_buffers(bdev, block, size, gfp))
1136 			return NULL;
1137 
1138 		if (blocking)
1139 			bh = __find_get_block_nonatomic(bdev, block, size);
1140 		else
1141 			bh = __find_get_block(bdev, block, size);
1142 		if (bh)
1143 			return bh;
1144 	}
1145 }
1146 
1147 /*
1148  * The relationship between dirty buffers and dirty pages:
1149  *
1150  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1151  * the page is tagged dirty in the page cache.
1152  *
1153  * At all times, the dirtiness of the buffers represents the dirtiness of
1154  * subsections of the page.  If the page has buffers, the page dirty bit is
1155  * merely a hint about the true dirty state.
1156  *
1157  * When a page is set dirty in its entirety, all its buffers are marked dirty
1158  * (if the page has buffers).
1159  *
1160  * When a buffer is marked dirty, its page is dirtied, but the page's other
1161  * buffers are not.
1162  *
1163  * Also.  When blockdev buffers are explicitly read with bread(), they
1164  * individually become uptodate.  But their backing page remains not
1165  * uptodate - even if all of its buffers are uptodate.  A subsequent
1166  * block_read_full_folio() against that folio will discover all the uptodate
1167  * buffers, will set the folio uptodate and will perform no I/O.
1168  */
1169 
1170 /**
1171  * mark_buffer_dirty - mark a buffer_head as needing writeout
1172  * @bh: the buffer_head to mark dirty
1173  *
1174  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1175  * its backing page dirty, then tag the page as dirty in the page cache
1176  * and then attach the address_space's inode to its superblock's dirty
1177  * inode list.
1178  *
1179  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->i_private_lock,
1180  * i_pages lock and mapping->host->i_lock.
1181  */
1182 void mark_buffer_dirty(struct buffer_head *bh)
1183 {
1184 	WARN_ON_ONCE(!buffer_uptodate(bh));
1185 
1186 	trace_block_dirty_buffer(bh);
1187 
1188 	/*
1189 	 * Very *carefully* optimize the it-is-already-dirty case.
1190 	 *
1191 	 * Don't let the final "is it dirty" escape to before we
1192 	 * perhaps modified the buffer.
1193 	 */
1194 	if (buffer_dirty(bh)) {
1195 		smp_mb();
1196 		if (buffer_dirty(bh))
1197 			return;
1198 	}
1199 
1200 	if (!test_set_buffer_dirty(bh)) {
1201 		struct folio *folio = bh->b_folio;
1202 		struct address_space *mapping = NULL;
1203 
1204 		if (!folio_test_set_dirty(folio)) {
1205 			mapping = folio->mapping;
1206 			if (mapping)
1207 				__folio_mark_dirty(folio, mapping, 0);
1208 		}
1209 		if (mapping)
1210 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1211 	}
1212 }
1213 EXPORT_SYMBOL(mark_buffer_dirty);
1214 
1215 void mark_buffer_write_io_error(struct buffer_head *bh)
1216 {
1217 	set_buffer_write_io_error(bh);
1218 	/* FIXME: do we need to set this in both places? */
1219 	if (bh->b_folio && bh->b_folio->mapping)
1220 		mapping_set_error(bh->b_folio->mapping, -EIO);
1221 	if (bh->b_assoc_map)
1222 		mapping_set_error(bh->b_assoc_map, -EIO);
1223 }
1224 EXPORT_SYMBOL(mark_buffer_write_io_error);
1225 
1226 /**
1227  * __brelse - Release a buffer.
1228  * @bh: The buffer to release.
1229  *
1230  * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1231  */
1232 void __brelse(struct buffer_head *bh)
1233 {
1234 	if (atomic_read(&bh->b_count)) {
1235 		put_bh(bh);
1236 		return;
1237 	}
1238 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1239 }
1240 EXPORT_SYMBOL(__brelse);
1241 
1242 /**
1243  * __bforget - Discard any dirty data in a buffer.
1244  * @bh: The buffer to forget.
1245  *
1246  * This variant of bforget() can be called if @bh is guaranteed to not
1247  * be NULL.
1248  */
1249 void __bforget(struct buffer_head *bh)
1250 {
1251 	clear_buffer_dirty(bh);
1252 	if (bh->b_assoc_map) {
1253 		struct address_space *buffer_mapping = bh->b_folio->mapping;
1254 
1255 		spin_lock(&buffer_mapping->i_private_lock);
1256 		list_del_init(&bh->b_assoc_buffers);
1257 		bh->b_assoc_map = NULL;
1258 		spin_unlock(&buffer_mapping->i_private_lock);
1259 	}
1260 	__brelse(bh);
1261 }
1262 EXPORT_SYMBOL(__bforget);
1263 
1264 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1265 {
1266 	lock_buffer(bh);
1267 	if (buffer_uptodate(bh)) {
1268 		unlock_buffer(bh);
1269 		return bh;
1270 	} else {
1271 		get_bh(bh);
1272 		bh->b_end_io = end_buffer_read_sync;
1273 		submit_bh(REQ_OP_READ, bh);
1274 		wait_on_buffer(bh);
1275 		if (buffer_uptodate(bh))
1276 			return bh;
1277 	}
1278 	brelse(bh);
1279 	return NULL;
1280 }
1281 
1282 /*
1283  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1284  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1285  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1286  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1287  * CPU's LRUs at the same time.
1288  *
1289  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1290  * sb_find_get_block().
1291  *
1292  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1293  * a local interrupt disable for that.
1294  */
1295 
1296 #define BH_LRU_SIZE	16
1297 
1298 struct bh_lru {
1299 	struct buffer_head *bhs[BH_LRU_SIZE];
1300 };
1301 
1302 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1303 
1304 #ifdef CONFIG_SMP
1305 #define bh_lru_lock()	local_irq_disable()
1306 #define bh_lru_unlock()	local_irq_enable()
1307 #else
1308 #define bh_lru_lock()	preempt_disable()
1309 #define bh_lru_unlock()	preempt_enable()
1310 #endif
1311 
1312 static inline void check_irqs_on(void)
1313 {
1314 #ifdef irqs_disabled
1315 	BUG_ON(irqs_disabled());
1316 #endif
1317 }
1318 
1319 /*
1320  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1321  * inserted at the front, and the buffer_head at the back if any is evicted.
1322  * Or, if already in the LRU it is moved to the front.
1323  */
1324 static void bh_lru_install(struct buffer_head *bh)
1325 {
1326 	struct buffer_head *evictee = bh;
1327 	struct bh_lru *b;
1328 	int i;
1329 
1330 	check_irqs_on();
1331 	bh_lru_lock();
1332 
1333 	/*
1334 	 * the refcount of buffer_head in bh_lru prevents dropping the
1335 	 * attached page(i.e., try_to_free_buffers) so it could cause
1336 	 * failing page migration.
1337 	 * Skip putting upcoming bh into bh_lru until migration is done.
1338 	 */
1339 	if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1340 		bh_lru_unlock();
1341 		return;
1342 	}
1343 
1344 	b = this_cpu_ptr(&bh_lrus);
1345 	for (i = 0; i < BH_LRU_SIZE; i++) {
1346 		swap(evictee, b->bhs[i]);
1347 		if (evictee == bh) {
1348 			bh_lru_unlock();
1349 			return;
1350 		}
1351 	}
1352 
1353 	get_bh(bh);
1354 	bh_lru_unlock();
1355 	brelse(evictee);
1356 }
1357 
1358 /*
1359  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1360  */
1361 static struct buffer_head *
1362 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1363 {
1364 	struct buffer_head *ret = NULL;
1365 	unsigned int i;
1366 
1367 	check_irqs_on();
1368 	bh_lru_lock();
1369 	if (cpu_is_isolated(smp_processor_id())) {
1370 		bh_lru_unlock();
1371 		return NULL;
1372 	}
1373 	for (i = 0; i < BH_LRU_SIZE; i++) {
1374 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1375 
1376 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1377 		    bh->b_size == size) {
1378 			if (i) {
1379 				while (i) {
1380 					__this_cpu_write(bh_lrus.bhs[i],
1381 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1382 					i--;
1383 				}
1384 				__this_cpu_write(bh_lrus.bhs[0], bh);
1385 			}
1386 			get_bh(bh);
1387 			ret = bh;
1388 			break;
1389 		}
1390 	}
1391 	bh_lru_unlock();
1392 	return ret;
1393 }
1394 
1395 /*
1396  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1397  * it in the LRU and mark it as accessed.  If it is not present then return
1398  * NULL. Atomic context callers may also return NULL if the buffer is being
1399  * migrated; similarly the page is not marked accessed either.
1400  */
1401 static struct buffer_head *
1402 find_get_block_common(struct block_device *bdev, sector_t block,
1403 			unsigned size, bool atomic)
1404 {
1405 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1406 
1407 	if (bh == NULL) {
1408 		/* __find_get_block_slow will mark the page accessed */
1409 		bh = __find_get_block_slow(bdev, block, atomic);
1410 		if (bh)
1411 			bh_lru_install(bh);
1412 	} else
1413 		touch_buffer(bh);
1414 
1415 	return bh;
1416 }
1417 
1418 struct buffer_head *
1419 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1420 {
1421 	return find_get_block_common(bdev, block, size, true);
1422 }
1423 EXPORT_SYMBOL(__find_get_block);
1424 
1425 /* same as __find_get_block() but allows sleeping contexts */
1426 struct buffer_head *
1427 __find_get_block_nonatomic(struct block_device *bdev, sector_t block,
1428 			   unsigned size)
1429 {
1430 	return find_get_block_common(bdev, block, size, false);
1431 }
1432 EXPORT_SYMBOL(__find_get_block_nonatomic);
1433 
1434 /**
1435  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1436  * @bdev: The block device.
1437  * @block: The block number.
1438  * @size: The size of buffer_heads for this @bdev.
1439  * @gfp: The memory allocation flags to use.
1440  *
1441  * The returned buffer head has its reference count incremented, but is
1442  * not locked.  The caller should call brelse() when it has finished
1443  * with the buffer.  The buffer may not be uptodate.  If needed, the
1444  * caller can bring it uptodate either by reading it or overwriting it.
1445  *
1446  * Return: The buffer head, or NULL if memory could not be allocated.
1447  */
1448 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1449 		unsigned size, gfp_t gfp)
1450 {
1451 	struct buffer_head *bh;
1452 
1453 	if (gfpflags_allow_blocking(gfp))
1454 		bh = __find_get_block_nonatomic(bdev, block, size);
1455 	else
1456 		bh = __find_get_block(bdev, block, size);
1457 
1458 	might_alloc(gfp);
1459 	if (bh)
1460 		return bh;
1461 
1462 	return __getblk_slow(bdev, block, size, gfp);
1463 }
1464 EXPORT_SYMBOL(bdev_getblk);
1465 
1466 /*
1467  * Do async read-ahead on a buffer..
1468  */
1469 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1470 {
1471 	struct buffer_head *bh = bdev_getblk(bdev, block, size,
1472 			GFP_NOWAIT | __GFP_MOVABLE);
1473 
1474 	if (likely(bh)) {
1475 		bh_readahead(bh, REQ_RAHEAD);
1476 		brelse(bh);
1477 	}
1478 }
1479 EXPORT_SYMBOL(__breadahead);
1480 
1481 /**
1482  * __bread_gfp() - Read a block.
1483  * @bdev: The block device to read from.
1484  * @block: Block number in units of block size.
1485  * @size: The block size of this device in bytes.
1486  * @gfp: Not page allocation flags; see below.
1487  *
1488  * You are not expected to call this function.  You should use one of
1489  * sb_bread(), sb_bread_unmovable() or __bread().
1490  *
1491  * Read a specified block, and return the buffer head that refers to it.
1492  * If @gfp is 0, the memory will be allocated using the block device's
1493  * default GFP flags.  If @gfp is __GFP_MOVABLE, the memory may be
1494  * allocated from a movable area.  Do not pass in a complete set of
1495  * GFP flags.
1496  *
1497  * The returned buffer head has its refcount increased.  The caller should
1498  * call brelse() when it has finished with the buffer.
1499  *
1500  * Context: May sleep waiting for I/O.
1501  * Return: NULL if the block was unreadable.
1502  */
1503 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1504 		unsigned size, gfp_t gfp)
1505 {
1506 	struct buffer_head *bh;
1507 
1508 	gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1509 
1510 	/*
1511 	 * Prefer looping in the allocator rather than here, at least that
1512 	 * code knows what it's doing.
1513 	 */
1514 	gfp |= __GFP_NOFAIL;
1515 
1516 	bh = bdev_getblk(bdev, block, size, gfp);
1517 
1518 	if (likely(bh) && !buffer_uptodate(bh))
1519 		bh = __bread_slow(bh);
1520 	return bh;
1521 }
1522 EXPORT_SYMBOL(__bread_gfp);
1523 
1524 static void __invalidate_bh_lrus(struct bh_lru *b)
1525 {
1526 	int i;
1527 
1528 	for (i = 0; i < BH_LRU_SIZE; i++) {
1529 		brelse(b->bhs[i]);
1530 		b->bhs[i] = NULL;
1531 	}
1532 }
1533 /*
1534  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1535  * This doesn't race because it runs in each cpu either in irq
1536  * or with preempt disabled.
1537  */
1538 static void invalidate_bh_lru(void *arg)
1539 {
1540 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1541 
1542 	__invalidate_bh_lrus(b);
1543 	put_cpu_var(bh_lrus);
1544 }
1545 
1546 bool has_bh_in_lru(int cpu, void *dummy)
1547 {
1548 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1549 	int i;
1550 
1551 	for (i = 0; i < BH_LRU_SIZE; i++) {
1552 		if (b->bhs[i])
1553 			return true;
1554 	}
1555 
1556 	return false;
1557 }
1558 
1559 void invalidate_bh_lrus(void)
1560 {
1561 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1562 }
1563 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1564 
1565 /*
1566  * It's called from workqueue context so we need a bh_lru_lock to close
1567  * the race with preemption/irq.
1568  */
1569 void invalidate_bh_lrus_cpu(void)
1570 {
1571 	struct bh_lru *b;
1572 
1573 	bh_lru_lock();
1574 	b = this_cpu_ptr(&bh_lrus);
1575 	__invalidate_bh_lrus(b);
1576 	bh_lru_unlock();
1577 }
1578 
1579 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1580 		  unsigned long offset)
1581 {
1582 	bh->b_folio = folio;
1583 	BUG_ON(offset >= folio_size(folio));
1584 	if (folio_test_highmem(folio))
1585 		/*
1586 		 * This catches illegal uses and preserves the offset:
1587 		 */
1588 		bh->b_data = (char *)(0 + offset);
1589 	else
1590 		bh->b_data = folio_address(folio) + offset;
1591 }
1592 EXPORT_SYMBOL(folio_set_bh);
1593 
1594 /*
1595  * Called when truncating a buffer on a page completely.
1596  */
1597 
1598 /* Bits that are cleared during an invalidate */
1599 #define BUFFER_FLAGS_DISCARD \
1600 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1601 	 1 << BH_Delay | 1 << BH_Unwritten)
1602 
1603 static void discard_buffer(struct buffer_head * bh)
1604 {
1605 	unsigned long b_state;
1606 
1607 	lock_buffer(bh);
1608 	clear_buffer_dirty(bh);
1609 	bh->b_bdev = NULL;
1610 	b_state = READ_ONCE(bh->b_state);
1611 	do {
1612 	} while (!try_cmpxchg_relaxed(&bh->b_state, &b_state,
1613 				      b_state & ~BUFFER_FLAGS_DISCARD));
1614 	unlock_buffer(bh);
1615 }
1616 
1617 /**
1618  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1619  * @folio: The folio which is affected.
1620  * @offset: start of the range to invalidate
1621  * @length: length of the range to invalidate
1622  *
1623  * block_invalidate_folio() is called when all or part of the folio has been
1624  * invalidated by a truncate operation.
1625  *
1626  * block_invalidate_folio() does not have to release all buffers, but it must
1627  * ensure that no dirty buffer is left outside @offset and that no I/O
1628  * is underway against any of the blocks which are outside the truncation
1629  * point.  Because the caller is about to free (and possibly reuse) those
1630  * blocks on-disk.
1631  */
1632 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1633 {
1634 	struct buffer_head *head, *bh, *next;
1635 	size_t curr_off = 0;
1636 	size_t stop = length + offset;
1637 
1638 	BUG_ON(!folio_test_locked(folio));
1639 
1640 	/*
1641 	 * Check for overflow
1642 	 */
1643 	BUG_ON(stop > folio_size(folio) || stop < length);
1644 
1645 	head = folio_buffers(folio);
1646 	if (!head)
1647 		return;
1648 
1649 	bh = head;
1650 	do {
1651 		size_t next_off = curr_off + bh->b_size;
1652 		next = bh->b_this_page;
1653 
1654 		/*
1655 		 * Are we still fully in range ?
1656 		 */
1657 		if (next_off > stop)
1658 			goto out;
1659 
1660 		/*
1661 		 * is this block fully invalidated?
1662 		 */
1663 		if (offset <= curr_off)
1664 			discard_buffer(bh);
1665 		curr_off = next_off;
1666 		bh = next;
1667 	} while (bh != head);
1668 
1669 	/*
1670 	 * We release buffers only if the entire folio is being invalidated.
1671 	 * The get_block cached value has been unconditionally invalidated,
1672 	 * so real IO is not possible anymore.
1673 	 */
1674 	if (length == folio_size(folio))
1675 		filemap_release_folio(folio, 0);
1676 out:
1677 	folio_clear_mappedtodisk(folio);
1678 }
1679 EXPORT_SYMBOL(block_invalidate_folio);
1680 
1681 /*
1682  * We attach and possibly dirty the buffers atomically wrt
1683  * block_dirty_folio() via i_private_lock.  try_to_free_buffers
1684  * is already excluded via the folio lock.
1685  */
1686 struct buffer_head *create_empty_buffers(struct folio *folio,
1687 		unsigned long blocksize, unsigned long b_state)
1688 {
1689 	struct buffer_head *bh, *head, *tail;
1690 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1691 
1692 	head = folio_alloc_buffers(folio, blocksize, gfp);
1693 	bh = head;
1694 	do {
1695 		bh->b_state |= b_state;
1696 		tail = bh;
1697 		bh = bh->b_this_page;
1698 	} while (bh);
1699 	tail->b_this_page = head;
1700 
1701 	spin_lock(&folio->mapping->i_private_lock);
1702 	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1703 		bh = head;
1704 		do {
1705 			if (folio_test_dirty(folio))
1706 				set_buffer_dirty(bh);
1707 			if (folio_test_uptodate(folio))
1708 				set_buffer_uptodate(bh);
1709 			bh = bh->b_this_page;
1710 		} while (bh != head);
1711 	}
1712 	folio_attach_private(folio, head);
1713 	spin_unlock(&folio->mapping->i_private_lock);
1714 
1715 	return head;
1716 }
1717 EXPORT_SYMBOL(create_empty_buffers);
1718 
1719 /**
1720  * clean_bdev_aliases: clean a range of buffers in block device
1721  * @bdev: Block device to clean buffers in
1722  * @block: Start of a range of blocks to clean
1723  * @len: Number of blocks to clean
1724  *
1725  * We are taking a range of blocks for data and we don't want writeback of any
1726  * buffer-cache aliases starting from return from this function and until the
1727  * moment when something will explicitly mark the buffer dirty (hopefully that
1728  * will not happen until we will free that block ;-) We don't even need to mark
1729  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1730  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1731  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1732  * would confuse anyone who might pick it with bread() afterwards...
1733  *
1734  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1735  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1736  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1737  * need to.  That happens here.
1738  */
1739 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1740 {
1741 	struct address_space *bd_mapping = bdev->bd_mapping;
1742 	const int blkbits = bd_mapping->host->i_blkbits;
1743 	struct folio_batch fbatch;
1744 	pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1745 	pgoff_t end;
1746 	int i, count;
1747 	struct buffer_head *bh;
1748 	struct buffer_head *head;
1749 
1750 	end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1751 	folio_batch_init(&fbatch);
1752 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1753 		count = folio_batch_count(&fbatch);
1754 		for (i = 0; i < count; i++) {
1755 			struct folio *folio = fbatch.folios[i];
1756 
1757 			if (!folio_buffers(folio))
1758 				continue;
1759 			/*
1760 			 * We use folio lock instead of bd_mapping->i_private_lock
1761 			 * to pin buffers here since we can afford to sleep and
1762 			 * it scales better than a global spinlock lock.
1763 			 */
1764 			folio_lock(folio);
1765 			/* Recheck when the folio is locked which pins bhs */
1766 			head = folio_buffers(folio);
1767 			if (!head)
1768 				goto unlock_page;
1769 			bh = head;
1770 			do {
1771 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1772 					goto next;
1773 				if (bh->b_blocknr >= block + len)
1774 					break;
1775 				clear_buffer_dirty(bh);
1776 				wait_on_buffer(bh);
1777 				clear_buffer_req(bh);
1778 next:
1779 				bh = bh->b_this_page;
1780 			} while (bh != head);
1781 unlock_page:
1782 			folio_unlock(folio);
1783 		}
1784 		folio_batch_release(&fbatch);
1785 		cond_resched();
1786 		/* End of range already reached? */
1787 		if (index > end || !index)
1788 			break;
1789 	}
1790 }
1791 EXPORT_SYMBOL(clean_bdev_aliases);
1792 
1793 static struct buffer_head *folio_create_buffers(struct folio *folio,
1794 						struct inode *inode,
1795 						unsigned int b_state)
1796 {
1797 	struct buffer_head *bh;
1798 
1799 	BUG_ON(!folio_test_locked(folio));
1800 
1801 	bh = folio_buffers(folio);
1802 	if (!bh)
1803 		bh = create_empty_buffers(folio,
1804 				1 << READ_ONCE(inode->i_blkbits), b_state);
1805 	return bh;
1806 }
1807 
1808 /*
1809  * NOTE! All mapped/uptodate combinations are valid:
1810  *
1811  *	Mapped	Uptodate	Meaning
1812  *
1813  *	No	No		"unknown" - must do get_block()
1814  *	No	Yes		"hole" - zero-filled
1815  *	Yes	No		"allocated" - allocated on disk, not read in
1816  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1817  *
1818  * "Dirty" is valid only with the last case (mapped+uptodate).
1819  */
1820 
1821 /*
1822  * While block_write_full_folio is writing back the dirty buffers under
1823  * the page lock, whoever dirtied the buffers may decide to clean them
1824  * again at any time.  We handle that by only looking at the buffer
1825  * state inside lock_buffer().
1826  *
1827  * If block_write_full_folio() is called for regular writeback
1828  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1829  * locked buffer.   This only can happen if someone has written the buffer
1830  * directly, with submit_bh().  At the address_space level PageWriteback
1831  * prevents this contention from occurring.
1832  *
1833  * If block_write_full_folio() is called with wbc->sync_mode ==
1834  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1835  * causes the writes to be flagged as synchronous writes.
1836  */
1837 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1838 			get_block_t *get_block, struct writeback_control *wbc)
1839 {
1840 	int err;
1841 	sector_t block;
1842 	sector_t last_block;
1843 	struct buffer_head *bh, *head;
1844 	size_t blocksize;
1845 	int nr_underway = 0;
1846 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1847 
1848 	head = folio_create_buffers(folio, inode,
1849 				    (1 << BH_Dirty) | (1 << BH_Uptodate));
1850 
1851 	/*
1852 	 * Be very careful.  We have no exclusion from block_dirty_folio
1853 	 * here, and the (potentially unmapped) buffers may become dirty at
1854 	 * any time.  If a buffer becomes dirty here after we've inspected it
1855 	 * then we just miss that fact, and the folio stays dirty.
1856 	 *
1857 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1858 	 * handle that here by just cleaning them.
1859 	 */
1860 
1861 	bh = head;
1862 	blocksize = bh->b_size;
1863 
1864 	block = div_u64(folio_pos(folio), blocksize);
1865 	last_block = div_u64(i_size_read(inode) - 1, blocksize);
1866 
1867 	/*
1868 	 * Get all the dirty buffers mapped to disk addresses and
1869 	 * handle any aliases from the underlying blockdev's mapping.
1870 	 */
1871 	do {
1872 		if (block > last_block) {
1873 			/*
1874 			 * mapped buffers outside i_size will occur, because
1875 			 * this folio can be outside i_size when there is a
1876 			 * truncate in progress.
1877 			 */
1878 			/*
1879 			 * The buffer was zeroed by block_write_full_folio()
1880 			 */
1881 			clear_buffer_dirty(bh);
1882 			set_buffer_uptodate(bh);
1883 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1884 			   buffer_dirty(bh)) {
1885 			WARN_ON(bh->b_size != blocksize);
1886 			err = get_block(inode, block, bh, 1);
1887 			if (err)
1888 				goto recover;
1889 			clear_buffer_delay(bh);
1890 			if (buffer_new(bh)) {
1891 				/* blockdev mappings never come here */
1892 				clear_buffer_new(bh);
1893 				clean_bdev_bh_alias(bh);
1894 			}
1895 		}
1896 		bh = bh->b_this_page;
1897 		block++;
1898 	} while (bh != head);
1899 
1900 	do {
1901 		if (!buffer_mapped(bh))
1902 			continue;
1903 		/*
1904 		 * If it's a fully non-blocking write attempt and we cannot
1905 		 * lock the buffer then redirty the folio.  Note that this can
1906 		 * potentially cause a busy-wait loop from writeback threads
1907 		 * and kswapd activity, but those code paths have their own
1908 		 * higher-level throttling.
1909 		 */
1910 		if (wbc->sync_mode != WB_SYNC_NONE) {
1911 			lock_buffer(bh);
1912 		} else if (!trylock_buffer(bh)) {
1913 			folio_redirty_for_writepage(wbc, folio);
1914 			continue;
1915 		}
1916 		if (test_clear_buffer_dirty(bh)) {
1917 			mark_buffer_async_write_endio(bh,
1918 				end_buffer_async_write);
1919 		} else {
1920 			unlock_buffer(bh);
1921 		}
1922 	} while ((bh = bh->b_this_page) != head);
1923 
1924 	/*
1925 	 * The folio and its buffers are protected by the writeback flag,
1926 	 * so we can drop the bh refcounts early.
1927 	 */
1928 	BUG_ON(folio_test_writeback(folio));
1929 	folio_start_writeback(folio);
1930 
1931 	do {
1932 		struct buffer_head *next = bh->b_this_page;
1933 		if (buffer_async_write(bh)) {
1934 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1935 				      inode->i_write_hint, wbc);
1936 			nr_underway++;
1937 		}
1938 		bh = next;
1939 	} while (bh != head);
1940 	folio_unlock(folio);
1941 
1942 	err = 0;
1943 done:
1944 	if (nr_underway == 0) {
1945 		/*
1946 		 * The folio was marked dirty, but the buffers were
1947 		 * clean.  Someone wrote them back by hand with
1948 		 * write_dirty_buffer/submit_bh.  A rare case.
1949 		 */
1950 		folio_end_writeback(folio);
1951 
1952 		/*
1953 		 * The folio and buffer_heads can be released at any time from
1954 		 * here on.
1955 		 */
1956 	}
1957 	return err;
1958 
1959 recover:
1960 	/*
1961 	 * ENOSPC, or some other error.  We may already have added some
1962 	 * blocks to the file, so we need to write these out to avoid
1963 	 * exposing stale data.
1964 	 * The folio is currently locked and not marked for writeback
1965 	 */
1966 	bh = head;
1967 	/* Recovery: lock and submit the mapped buffers */
1968 	do {
1969 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1970 		    !buffer_delay(bh)) {
1971 			lock_buffer(bh);
1972 			mark_buffer_async_write_endio(bh,
1973 				end_buffer_async_write);
1974 		} else {
1975 			/*
1976 			 * The buffer may have been set dirty during
1977 			 * attachment to a dirty folio.
1978 			 */
1979 			clear_buffer_dirty(bh);
1980 		}
1981 	} while ((bh = bh->b_this_page) != head);
1982 	BUG_ON(folio_test_writeback(folio));
1983 	mapping_set_error(folio->mapping, err);
1984 	folio_start_writeback(folio);
1985 	do {
1986 		struct buffer_head *next = bh->b_this_page;
1987 		if (buffer_async_write(bh)) {
1988 			clear_buffer_dirty(bh);
1989 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1990 				      inode->i_write_hint, wbc);
1991 			nr_underway++;
1992 		}
1993 		bh = next;
1994 	} while (bh != head);
1995 	folio_unlock(folio);
1996 	goto done;
1997 }
1998 EXPORT_SYMBOL(__block_write_full_folio);
1999 
2000 /*
2001  * If a folio has any new buffers, zero them out here, and mark them uptodate
2002  * and dirty so they'll be written out (in order to prevent uninitialised
2003  * block data from leaking). And clear the new bit.
2004  */
2005 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
2006 {
2007 	size_t block_start, block_end;
2008 	struct buffer_head *head, *bh;
2009 
2010 	BUG_ON(!folio_test_locked(folio));
2011 	head = folio_buffers(folio);
2012 	if (!head)
2013 		return;
2014 
2015 	bh = head;
2016 	block_start = 0;
2017 	do {
2018 		block_end = block_start + bh->b_size;
2019 
2020 		if (buffer_new(bh)) {
2021 			if (block_end > from && block_start < to) {
2022 				if (!folio_test_uptodate(folio)) {
2023 					size_t start, xend;
2024 
2025 					start = max(from, block_start);
2026 					xend = min(to, block_end);
2027 
2028 					folio_zero_segment(folio, start, xend);
2029 					set_buffer_uptodate(bh);
2030 				}
2031 
2032 				clear_buffer_new(bh);
2033 				mark_buffer_dirty(bh);
2034 			}
2035 		}
2036 
2037 		block_start = block_end;
2038 		bh = bh->b_this_page;
2039 	} while (bh != head);
2040 }
2041 EXPORT_SYMBOL(folio_zero_new_buffers);
2042 
2043 static int
2044 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2045 		const struct iomap *iomap)
2046 {
2047 	loff_t offset = (loff_t)block << inode->i_blkbits;
2048 
2049 	bh->b_bdev = iomap->bdev;
2050 
2051 	/*
2052 	 * Block points to offset in file we need to map, iomap contains
2053 	 * the offset at which the map starts. If the map ends before the
2054 	 * current block, then do not map the buffer and let the caller
2055 	 * handle it.
2056 	 */
2057 	if (offset >= iomap->offset + iomap->length)
2058 		return -EIO;
2059 
2060 	switch (iomap->type) {
2061 	case IOMAP_HOLE:
2062 		/*
2063 		 * If the buffer is not up to date or beyond the current EOF,
2064 		 * we need to mark it as new to ensure sub-block zeroing is
2065 		 * executed if necessary.
2066 		 */
2067 		if (!buffer_uptodate(bh) ||
2068 		    (offset >= i_size_read(inode)))
2069 			set_buffer_new(bh);
2070 		return 0;
2071 	case IOMAP_DELALLOC:
2072 		if (!buffer_uptodate(bh) ||
2073 		    (offset >= i_size_read(inode)))
2074 			set_buffer_new(bh);
2075 		set_buffer_uptodate(bh);
2076 		set_buffer_mapped(bh);
2077 		set_buffer_delay(bh);
2078 		return 0;
2079 	case IOMAP_UNWRITTEN:
2080 		/*
2081 		 * For unwritten regions, we always need to ensure that regions
2082 		 * in the block we are not writing to are zeroed. Mark the
2083 		 * buffer as new to ensure this.
2084 		 */
2085 		set_buffer_new(bh);
2086 		set_buffer_unwritten(bh);
2087 		fallthrough;
2088 	case IOMAP_MAPPED:
2089 		if ((iomap->flags & IOMAP_F_NEW) ||
2090 		    offset >= i_size_read(inode)) {
2091 			/*
2092 			 * This can happen if truncating the block device races
2093 			 * with the check in the caller as i_size updates on
2094 			 * block devices aren't synchronized by i_rwsem for
2095 			 * block devices.
2096 			 */
2097 			if (S_ISBLK(inode->i_mode))
2098 				return -EIO;
2099 			set_buffer_new(bh);
2100 		}
2101 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2102 				inode->i_blkbits;
2103 		set_buffer_mapped(bh);
2104 		return 0;
2105 	default:
2106 		WARN_ON_ONCE(1);
2107 		return -EIO;
2108 	}
2109 }
2110 
2111 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2112 		get_block_t *get_block, const struct iomap *iomap)
2113 {
2114 	size_t from = offset_in_folio(folio, pos);
2115 	size_t to = from + len;
2116 	struct inode *inode = folio->mapping->host;
2117 	size_t block_start, block_end;
2118 	sector_t block;
2119 	int err = 0;
2120 	size_t blocksize;
2121 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2122 
2123 	BUG_ON(!folio_test_locked(folio));
2124 	BUG_ON(to > folio_size(folio));
2125 	BUG_ON(from > to);
2126 
2127 	head = folio_create_buffers(folio, inode, 0);
2128 	blocksize = head->b_size;
2129 	block = div_u64(folio_pos(folio), blocksize);
2130 
2131 	for (bh = head, block_start = 0; bh != head || !block_start;
2132 	    block++, block_start=block_end, bh = bh->b_this_page) {
2133 		block_end = block_start + blocksize;
2134 		if (block_end <= from || block_start >= to) {
2135 			if (folio_test_uptodate(folio)) {
2136 				if (!buffer_uptodate(bh))
2137 					set_buffer_uptodate(bh);
2138 			}
2139 			continue;
2140 		}
2141 		if (buffer_new(bh))
2142 			clear_buffer_new(bh);
2143 		if (!buffer_mapped(bh)) {
2144 			WARN_ON(bh->b_size != blocksize);
2145 			if (get_block)
2146 				err = get_block(inode, block, bh, 1);
2147 			else
2148 				err = iomap_to_bh(inode, block, bh, iomap);
2149 			if (err)
2150 				break;
2151 
2152 			if (buffer_new(bh)) {
2153 				clean_bdev_bh_alias(bh);
2154 				if (folio_test_uptodate(folio)) {
2155 					clear_buffer_new(bh);
2156 					set_buffer_uptodate(bh);
2157 					mark_buffer_dirty(bh);
2158 					continue;
2159 				}
2160 				if (block_end > to || block_start < from)
2161 					folio_zero_segments(folio,
2162 						to, block_end,
2163 						block_start, from);
2164 				continue;
2165 			}
2166 		}
2167 		if (folio_test_uptodate(folio)) {
2168 			if (!buffer_uptodate(bh))
2169 				set_buffer_uptodate(bh);
2170 			continue;
2171 		}
2172 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2173 		    !buffer_unwritten(bh) &&
2174 		     (block_start < from || block_end > to)) {
2175 			bh_read_nowait(bh, 0);
2176 			*wait_bh++=bh;
2177 		}
2178 	}
2179 	/*
2180 	 * If we issued read requests - let them complete.
2181 	 */
2182 	while(wait_bh > wait) {
2183 		wait_on_buffer(*--wait_bh);
2184 		if (!buffer_uptodate(*wait_bh))
2185 			err = -EIO;
2186 	}
2187 	if (unlikely(err))
2188 		folio_zero_new_buffers(folio, from, to);
2189 	return err;
2190 }
2191 
2192 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2193 		get_block_t *get_block)
2194 {
2195 	return __block_write_begin_int(folio, pos, len, get_block, NULL);
2196 }
2197 EXPORT_SYMBOL(__block_write_begin);
2198 
2199 void block_commit_write(struct folio *folio, size_t from, size_t to)
2200 {
2201 	size_t block_start, block_end;
2202 	bool partial = false;
2203 	unsigned blocksize;
2204 	struct buffer_head *bh, *head;
2205 
2206 	bh = head = folio_buffers(folio);
2207 	if (!bh)
2208 		return;
2209 	blocksize = bh->b_size;
2210 
2211 	block_start = 0;
2212 	do {
2213 		block_end = block_start + blocksize;
2214 		if (block_end <= from || block_start >= to) {
2215 			if (!buffer_uptodate(bh))
2216 				partial = true;
2217 		} else {
2218 			set_buffer_uptodate(bh);
2219 			mark_buffer_dirty(bh);
2220 		}
2221 		if (buffer_new(bh))
2222 			clear_buffer_new(bh);
2223 
2224 		block_start = block_end;
2225 		bh = bh->b_this_page;
2226 	} while (bh != head);
2227 
2228 	/*
2229 	 * If this is a partial write which happened to make all buffers
2230 	 * uptodate then we can optimize away a bogus read_folio() for
2231 	 * the next read(). Here we 'discover' whether the folio went
2232 	 * uptodate as a result of this (potentially partial) write.
2233 	 */
2234 	if (!partial)
2235 		folio_mark_uptodate(folio);
2236 }
2237 EXPORT_SYMBOL(block_commit_write);
2238 
2239 /*
2240  * block_write_begin takes care of the basic task of block allocation and
2241  * bringing partial write blocks uptodate first.
2242  *
2243  * The filesystem needs to handle block truncation upon failure.
2244  */
2245 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2246 		struct folio **foliop, get_block_t *get_block)
2247 {
2248 	pgoff_t index = pos >> PAGE_SHIFT;
2249 	struct folio *folio;
2250 	int status;
2251 
2252 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2253 			mapping_gfp_mask(mapping));
2254 	if (IS_ERR(folio))
2255 		return PTR_ERR(folio);
2256 
2257 	status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2258 	if (unlikely(status)) {
2259 		folio_unlock(folio);
2260 		folio_put(folio);
2261 		folio = NULL;
2262 	}
2263 
2264 	*foliop = folio;
2265 	return status;
2266 }
2267 EXPORT_SYMBOL(block_write_begin);
2268 
2269 int block_write_end(loff_t pos, unsigned len, unsigned copied,
2270 		struct folio *folio)
2271 {
2272 	size_t start = pos - folio_pos(folio);
2273 
2274 	if (unlikely(copied < len)) {
2275 		/*
2276 		 * The buffers that were written will now be uptodate, so
2277 		 * we don't have to worry about a read_folio reading them
2278 		 * and overwriting a partial write. However if we have
2279 		 * encountered a short write and only partially written
2280 		 * into a buffer, it will not be marked uptodate, so a
2281 		 * read_folio might come in and destroy our partial write.
2282 		 *
2283 		 * Do the simplest thing, and just treat any short write to a
2284 		 * non uptodate folio as a zero-length write, and force the
2285 		 * caller to redo the whole thing.
2286 		 */
2287 		if (!folio_test_uptodate(folio))
2288 			copied = 0;
2289 
2290 		folio_zero_new_buffers(folio, start+copied, start+len);
2291 	}
2292 	flush_dcache_folio(folio);
2293 
2294 	/* This could be a short (even 0-length) commit */
2295 	block_commit_write(folio, start, start + copied);
2296 
2297 	return copied;
2298 }
2299 EXPORT_SYMBOL(block_write_end);
2300 
2301 int generic_write_end(const struct kiocb *iocb, struct address_space *mapping,
2302 		      loff_t pos, unsigned len, unsigned copied,
2303 		      struct folio *folio, void *fsdata)
2304 {
2305 	struct inode *inode = mapping->host;
2306 	loff_t old_size = inode->i_size;
2307 	bool i_size_changed = false;
2308 
2309 	copied = block_write_end(pos, len, copied, folio);
2310 
2311 	/*
2312 	 * No need to use i_size_read() here, the i_size cannot change under us
2313 	 * because we hold i_rwsem.
2314 	 *
2315 	 * But it's important to update i_size while still holding folio lock:
2316 	 * page writeout could otherwise come in and zero beyond i_size.
2317 	 */
2318 	if (pos + copied > inode->i_size) {
2319 		i_size_write(inode, pos + copied);
2320 		i_size_changed = true;
2321 	}
2322 
2323 	folio_unlock(folio);
2324 	folio_put(folio);
2325 
2326 	if (old_size < pos)
2327 		pagecache_isize_extended(inode, old_size, pos);
2328 	/*
2329 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2330 	 * makes the holding time of page lock longer. Second, it forces lock
2331 	 * ordering of page lock and transaction start for journaling
2332 	 * filesystems.
2333 	 */
2334 	if (i_size_changed)
2335 		mark_inode_dirty(inode);
2336 	return copied;
2337 }
2338 EXPORT_SYMBOL(generic_write_end);
2339 
2340 /*
2341  * block_is_partially_uptodate checks whether buffers within a folio are
2342  * uptodate or not.
2343  *
2344  * Returns true if all buffers which correspond to the specified part
2345  * of the folio are uptodate.
2346  */
2347 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2348 {
2349 	unsigned block_start, block_end, blocksize;
2350 	unsigned to;
2351 	struct buffer_head *bh, *head;
2352 	bool ret = true;
2353 
2354 	head = folio_buffers(folio);
2355 	if (!head)
2356 		return false;
2357 	blocksize = head->b_size;
2358 	to = min(folio_size(folio) - from, count);
2359 	to = from + to;
2360 	if (from < blocksize && to > folio_size(folio) - blocksize)
2361 		return false;
2362 
2363 	bh = head;
2364 	block_start = 0;
2365 	do {
2366 		block_end = block_start + blocksize;
2367 		if (block_end > from && block_start < to) {
2368 			if (!buffer_uptodate(bh)) {
2369 				ret = false;
2370 				break;
2371 			}
2372 			if (block_end >= to)
2373 				break;
2374 		}
2375 		block_start = block_end;
2376 		bh = bh->b_this_page;
2377 	} while (bh != head);
2378 
2379 	return ret;
2380 }
2381 EXPORT_SYMBOL(block_is_partially_uptodate);
2382 
2383 /*
2384  * Generic "read_folio" function for block devices that have the normal
2385  * get_block functionality. This is most of the block device filesystems.
2386  * Reads the folio asynchronously --- the unlock_buffer() and
2387  * set/clear_buffer_uptodate() functions propagate buffer state into the
2388  * folio once IO has completed.
2389  */
2390 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2391 {
2392 	struct inode *inode = folio->mapping->host;
2393 	sector_t iblock, lblock;
2394 	struct buffer_head *bh, *head, *prev = NULL;
2395 	size_t blocksize;
2396 	int fully_mapped = 1;
2397 	bool page_error = false;
2398 	loff_t limit = i_size_read(inode);
2399 
2400 	/* This is needed for ext4. */
2401 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2402 		limit = inode->i_sb->s_maxbytes;
2403 
2404 	head = folio_create_buffers(folio, inode, 0);
2405 	blocksize = head->b_size;
2406 
2407 	iblock = div_u64(folio_pos(folio), blocksize);
2408 	lblock = div_u64(limit + blocksize - 1, blocksize);
2409 	bh = head;
2410 
2411 	do {
2412 		if (buffer_uptodate(bh))
2413 			continue;
2414 
2415 		if (!buffer_mapped(bh)) {
2416 			int err = 0;
2417 
2418 			fully_mapped = 0;
2419 			if (iblock < lblock) {
2420 				WARN_ON(bh->b_size != blocksize);
2421 				err = get_block(inode, iblock, bh, 0);
2422 				if (err)
2423 					page_error = true;
2424 			}
2425 			if (!buffer_mapped(bh)) {
2426 				folio_zero_range(folio, bh_offset(bh),
2427 						blocksize);
2428 				if (!err)
2429 					set_buffer_uptodate(bh);
2430 				continue;
2431 			}
2432 			/*
2433 			 * get_block() might have updated the buffer
2434 			 * synchronously
2435 			 */
2436 			if (buffer_uptodate(bh))
2437 				continue;
2438 		}
2439 
2440 		lock_buffer(bh);
2441 		if (buffer_uptodate(bh)) {
2442 			unlock_buffer(bh);
2443 			continue;
2444 		}
2445 
2446 		mark_buffer_async_read(bh);
2447 		if (prev)
2448 			submit_bh(REQ_OP_READ, prev);
2449 		prev = bh;
2450 	} while (iblock++, (bh = bh->b_this_page) != head);
2451 
2452 	if (fully_mapped)
2453 		folio_set_mappedtodisk(folio);
2454 
2455 	/*
2456 	 * All buffers are uptodate or get_block() returned an error
2457 	 * when trying to map them - we must finish the read because
2458 	 * end_buffer_async_read() will never be called on any buffer
2459 	 * in this folio.
2460 	 */
2461 	if (prev)
2462 		submit_bh(REQ_OP_READ, prev);
2463 	else
2464 		folio_end_read(folio, !page_error);
2465 
2466 	return 0;
2467 }
2468 EXPORT_SYMBOL(block_read_full_folio);
2469 
2470 /* utility function for filesystems that need to do work on expanding
2471  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2472  * deal with the hole.
2473  */
2474 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2475 {
2476 	struct address_space *mapping = inode->i_mapping;
2477 	const struct address_space_operations *aops = mapping->a_ops;
2478 	struct folio *folio;
2479 	void *fsdata = NULL;
2480 	int err;
2481 
2482 	err = inode_newsize_ok(inode, size);
2483 	if (err)
2484 		goto out;
2485 
2486 	err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2487 	if (err)
2488 		goto out;
2489 
2490 	err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2491 	BUG_ON(err > 0);
2492 
2493 out:
2494 	return err;
2495 }
2496 EXPORT_SYMBOL(generic_cont_expand_simple);
2497 
2498 static int cont_expand_zero(const struct kiocb *iocb,
2499 			    struct address_space *mapping,
2500 			    loff_t pos, loff_t *bytes)
2501 {
2502 	struct inode *inode = mapping->host;
2503 	const struct address_space_operations *aops = mapping->a_ops;
2504 	unsigned int blocksize = i_blocksize(inode);
2505 	struct folio *folio;
2506 	void *fsdata = NULL;
2507 	pgoff_t index, curidx;
2508 	loff_t curpos;
2509 	unsigned zerofrom, offset, len;
2510 	int err = 0;
2511 
2512 	index = pos >> PAGE_SHIFT;
2513 	offset = pos & ~PAGE_MASK;
2514 
2515 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2516 		zerofrom = curpos & ~PAGE_MASK;
2517 		if (zerofrom & (blocksize-1)) {
2518 			*bytes |= (blocksize-1);
2519 			(*bytes)++;
2520 		}
2521 		len = PAGE_SIZE - zerofrom;
2522 
2523 		err = aops->write_begin(iocb, mapping, curpos, len,
2524 					    &folio, &fsdata);
2525 		if (err)
2526 			goto out;
2527 		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2528 		err = aops->write_end(iocb, mapping, curpos, len, len,
2529 						folio, fsdata);
2530 		if (err < 0)
2531 			goto out;
2532 		BUG_ON(err != len);
2533 		err = 0;
2534 
2535 		balance_dirty_pages_ratelimited(mapping);
2536 
2537 		if (fatal_signal_pending(current)) {
2538 			err = -EINTR;
2539 			goto out;
2540 		}
2541 	}
2542 
2543 	/* page covers the boundary, find the boundary offset */
2544 	if (index == curidx) {
2545 		zerofrom = curpos & ~PAGE_MASK;
2546 		/* if we will expand the thing last block will be filled */
2547 		if (offset <= zerofrom) {
2548 			goto out;
2549 		}
2550 		if (zerofrom & (blocksize-1)) {
2551 			*bytes |= (blocksize-1);
2552 			(*bytes)++;
2553 		}
2554 		len = offset - zerofrom;
2555 
2556 		err = aops->write_begin(iocb, mapping, curpos, len,
2557 					    &folio, &fsdata);
2558 		if (err)
2559 			goto out;
2560 		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2561 		err = aops->write_end(iocb, mapping, curpos, len, len,
2562 						folio, fsdata);
2563 		if (err < 0)
2564 			goto out;
2565 		BUG_ON(err != len);
2566 		err = 0;
2567 	}
2568 out:
2569 	return err;
2570 }
2571 
2572 /*
2573  * For moronic filesystems that do not allow holes in file.
2574  * We may have to extend the file.
2575  */
2576 int cont_write_begin(const struct kiocb *iocb, struct address_space *mapping,
2577 		     loff_t pos, unsigned len, struct folio **foliop,
2578 		     void **fsdata, get_block_t *get_block, loff_t *bytes)
2579 {
2580 	struct inode *inode = mapping->host;
2581 	unsigned int blocksize = i_blocksize(inode);
2582 	unsigned int zerofrom;
2583 	int err;
2584 
2585 	err = cont_expand_zero(iocb, mapping, pos, bytes);
2586 	if (err)
2587 		return err;
2588 
2589 	zerofrom = *bytes & ~PAGE_MASK;
2590 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2591 		*bytes |= (blocksize-1);
2592 		(*bytes)++;
2593 	}
2594 
2595 	return block_write_begin(mapping, pos, len, foliop, get_block);
2596 }
2597 EXPORT_SYMBOL(cont_write_begin);
2598 
2599 /*
2600  * block_page_mkwrite() is not allowed to change the file size as it gets
2601  * called from a page fault handler when a page is first dirtied. Hence we must
2602  * be careful to check for EOF conditions here. We set the page up correctly
2603  * for a written page which means we get ENOSPC checking when writing into
2604  * holes and correct delalloc and unwritten extent mapping on filesystems that
2605  * support these features.
2606  *
2607  * We are not allowed to take the i_rwsem here so we have to play games to
2608  * protect against truncate races as the page could now be beyond EOF.  Because
2609  * truncate writes the inode size before removing pages, once we have the
2610  * page lock we can determine safely if the page is beyond EOF. If it is not
2611  * beyond EOF, then the page is guaranteed safe against truncation until we
2612  * unlock the page.
2613  *
2614  * Direct callers of this function should protect against filesystem freezing
2615  * using sb_start_pagefault() - sb_end_pagefault() functions.
2616  */
2617 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2618 			 get_block_t get_block)
2619 {
2620 	struct folio *folio = page_folio(vmf->page);
2621 	struct inode *inode = file_inode(vma->vm_file);
2622 	unsigned long end;
2623 	loff_t size;
2624 	int ret;
2625 
2626 	folio_lock(folio);
2627 	size = i_size_read(inode);
2628 	if ((folio->mapping != inode->i_mapping) ||
2629 	    (folio_pos(folio) >= size)) {
2630 		/* We overload EFAULT to mean page got truncated */
2631 		ret = -EFAULT;
2632 		goto out_unlock;
2633 	}
2634 
2635 	end = folio_size(folio);
2636 	/* folio is wholly or partially inside EOF */
2637 	if (folio_pos(folio) + end > size)
2638 		end = size - folio_pos(folio);
2639 
2640 	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2641 	if (unlikely(ret))
2642 		goto out_unlock;
2643 
2644 	block_commit_write(folio, 0, end);
2645 
2646 	folio_mark_dirty(folio);
2647 	folio_wait_stable(folio);
2648 	return 0;
2649 out_unlock:
2650 	folio_unlock(folio);
2651 	return ret;
2652 }
2653 EXPORT_SYMBOL(block_page_mkwrite);
2654 
2655 int block_truncate_page(struct address_space *mapping,
2656 			loff_t from, get_block_t *get_block)
2657 {
2658 	pgoff_t index = from >> PAGE_SHIFT;
2659 	unsigned blocksize;
2660 	sector_t iblock;
2661 	size_t offset, length, pos;
2662 	struct inode *inode = mapping->host;
2663 	struct folio *folio;
2664 	struct buffer_head *bh;
2665 	int err = 0;
2666 
2667 	blocksize = i_blocksize(inode);
2668 	length = from & (blocksize - 1);
2669 
2670 	/* Block boundary? Nothing to do */
2671 	if (!length)
2672 		return 0;
2673 
2674 	length = blocksize - length;
2675 	iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2676 
2677 	folio = filemap_grab_folio(mapping, index);
2678 	if (IS_ERR(folio))
2679 		return PTR_ERR(folio);
2680 
2681 	bh = folio_buffers(folio);
2682 	if (!bh)
2683 		bh = create_empty_buffers(folio, blocksize, 0);
2684 
2685 	/* Find the buffer that contains "offset" */
2686 	offset = offset_in_folio(folio, from);
2687 	pos = blocksize;
2688 	while (offset >= pos) {
2689 		bh = bh->b_this_page;
2690 		iblock++;
2691 		pos += blocksize;
2692 	}
2693 
2694 	if (!buffer_mapped(bh)) {
2695 		WARN_ON(bh->b_size != blocksize);
2696 		err = get_block(inode, iblock, bh, 0);
2697 		if (err)
2698 			goto unlock;
2699 		/* unmapped? It's a hole - nothing to do */
2700 		if (!buffer_mapped(bh))
2701 			goto unlock;
2702 	}
2703 
2704 	/* Ok, it's mapped. Make sure it's up-to-date */
2705 	if (folio_test_uptodate(folio))
2706 		set_buffer_uptodate(bh);
2707 
2708 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2709 		err = bh_read(bh, 0);
2710 		/* Uhhuh. Read error. Complain and punt. */
2711 		if (err < 0)
2712 			goto unlock;
2713 	}
2714 
2715 	folio_zero_range(folio, offset, length);
2716 	mark_buffer_dirty(bh);
2717 
2718 unlock:
2719 	folio_unlock(folio);
2720 	folio_put(folio);
2721 
2722 	return err;
2723 }
2724 EXPORT_SYMBOL(block_truncate_page);
2725 
2726 /*
2727  * The generic write folio function for buffer-backed address_spaces
2728  */
2729 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2730 		void *get_block)
2731 {
2732 	struct inode * const inode = folio->mapping->host;
2733 	loff_t i_size = i_size_read(inode);
2734 
2735 	/* Is the folio fully inside i_size? */
2736 	if (folio_next_pos(folio) <= i_size)
2737 		return __block_write_full_folio(inode, folio, get_block, wbc);
2738 
2739 	/* Is the folio fully outside i_size? (truncate in progress) */
2740 	if (folio_pos(folio) >= i_size) {
2741 		folio_unlock(folio);
2742 		return 0; /* don't care */
2743 	}
2744 
2745 	/*
2746 	 * The folio straddles i_size.  It must be zeroed out on each and every
2747 	 * writeback invocation because it may be mmapped.  "A file is mapped
2748 	 * in multiples of the page size.  For a file that is not a multiple of
2749 	 * the page size, the remaining memory is zeroed when mapped, and
2750 	 * writes to that region are not written out to the file."
2751 	 */
2752 	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2753 			folio_size(folio));
2754 	return __block_write_full_folio(inode, folio, get_block, wbc);
2755 }
2756 
2757 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2758 			    get_block_t *get_block)
2759 {
2760 	struct inode *inode = mapping->host;
2761 	struct buffer_head tmp = {
2762 		.b_size = i_blocksize(inode),
2763 	};
2764 
2765 	get_block(inode, block, &tmp, 0);
2766 	return tmp.b_blocknr;
2767 }
2768 EXPORT_SYMBOL(generic_block_bmap);
2769 
2770 static void end_bio_bh_io_sync(struct bio *bio)
2771 {
2772 	struct buffer_head *bh = bio->bi_private;
2773 
2774 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2775 		set_bit(BH_Quiet, &bh->b_state);
2776 
2777 	bh->b_end_io(bh, !bio->bi_status);
2778 	bio_put(bio);
2779 }
2780 
2781 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2782 			  enum rw_hint write_hint,
2783 			  struct writeback_control *wbc)
2784 {
2785 	const enum req_op op = opf & REQ_OP_MASK;
2786 	struct bio *bio;
2787 
2788 	BUG_ON(!buffer_locked(bh));
2789 	BUG_ON(!buffer_mapped(bh));
2790 	BUG_ON(!bh->b_end_io);
2791 	BUG_ON(buffer_delay(bh));
2792 	BUG_ON(buffer_unwritten(bh));
2793 
2794 	/*
2795 	 * Only clear out a write error when rewriting
2796 	 */
2797 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2798 		clear_buffer_write_io_error(bh);
2799 
2800 	if (buffer_meta(bh))
2801 		opf |= REQ_META;
2802 	if (buffer_prio(bh))
2803 		opf |= REQ_PRIO;
2804 
2805 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2806 
2807 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2808 
2809 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2810 	bio->bi_write_hint = write_hint;
2811 
2812 	bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
2813 
2814 	bio->bi_end_io = end_bio_bh_io_sync;
2815 	bio->bi_private = bh;
2816 
2817 	/* Take care of bh's that straddle the end of the device */
2818 	guard_bio_eod(bio);
2819 
2820 	if (wbc) {
2821 		wbc_init_bio(wbc, bio);
2822 		wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2823 	}
2824 
2825 	blk_crypto_submit_bio(bio);
2826 }
2827 
2828 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2829 {
2830 	submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2831 }
2832 EXPORT_SYMBOL(submit_bh);
2833 
2834 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2835 {
2836 	lock_buffer(bh);
2837 	if (!test_clear_buffer_dirty(bh)) {
2838 		unlock_buffer(bh);
2839 		return;
2840 	}
2841 	bh->b_end_io = end_buffer_write_sync;
2842 	get_bh(bh);
2843 	submit_bh(REQ_OP_WRITE | op_flags, bh);
2844 }
2845 EXPORT_SYMBOL(write_dirty_buffer);
2846 
2847 /*
2848  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2849  * and then start new I/O and then wait upon it.  The caller must have a ref on
2850  * the buffer_head.
2851  */
2852 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2853 {
2854 	WARN_ON(atomic_read(&bh->b_count) < 1);
2855 	lock_buffer(bh);
2856 	if (test_clear_buffer_dirty(bh)) {
2857 		/*
2858 		 * The bh should be mapped, but it might not be if the
2859 		 * device was hot-removed. Not much we can do but fail the I/O.
2860 		 */
2861 		if (!buffer_mapped(bh)) {
2862 			unlock_buffer(bh);
2863 			return -EIO;
2864 		}
2865 
2866 		get_bh(bh);
2867 		bh->b_end_io = end_buffer_write_sync;
2868 		submit_bh(REQ_OP_WRITE | op_flags, bh);
2869 		wait_on_buffer(bh);
2870 		if (!buffer_uptodate(bh))
2871 			return -EIO;
2872 	} else {
2873 		unlock_buffer(bh);
2874 	}
2875 	return 0;
2876 }
2877 EXPORT_SYMBOL(__sync_dirty_buffer);
2878 
2879 int sync_dirty_buffer(struct buffer_head *bh)
2880 {
2881 	return __sync_dirty_buffer(bh, REQ_SYNC);
2882 }
2883 EXPORT_SYMBOL(sync_dirty_buffer);
2884 
2885 static inline int buffer_busy(struct buffer_head *bh)
2886 {
2887 	return atomic_read(&bh->b_count) |
2888 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2889 }
2890 
2891 static bool
2892 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2893 {
2894 	struct buffer_head *head = folio_buffers(folio);
2895 	struct buffer_head *bh;
2896 
2897 	bh = head;
2898 	do {
2899 		if (buffer_busy(bh))
2900 			goto failed;
2901 		bh = bh->b_this_page;
2902 	} while (bh != head);
2903 
2904 	do {
2905 		struct buffer_head *next = bh->b_this_page;
2906 
2907 		if (bh->b_assoc_map)
2908 			__remove_assoc_queue(bh);
2909 		bh = next;
2910 	} while (bh != head);
2911 	*buffers_to_free = head;
2912 	folio_detach_private(folio);
2913 	return true;
2914 failed:
2915 	return false;
2916 }
2917 
2918 /**
2919  * try_to_free_buffers - Release buffers attached to this folio.
2920  * @folio: The folio.
2921  *
2922  * If any buffers are in use (dirty, under writeback, elevated refcount),
2923  * no buffers will be freed.
2924  *
2925  * If the folio is dirty but all the buffers are clean then we need to
2926  * be sure to mark the folio clean as well.  This is because the folio
2927  * may be against a block device, and a later reattachment of buffers
2928  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2929  * filesystem data on the same device.
2930  *
2931  * The same applies to regular filesystem folios: if all the buffers are
2932  * clean then we set the folio clean and proceed.  To do that, we require
2933  * total exclusion from block_dirty_folio().  That is obtained with
2934  * i_private_lock.
2935  *
2936  * Exclusion against try_to_free_buffers may be obtained by either
2937  * locking the folio or by holding its mapping's i_private_lock.
2938  *
2939  * Context: Process context.  @folio must be locked.  Will not sleep.
2940  * Return: true if all buffers attached to this folio were freed.
2941  */
2942 bool try_to_free_buffers(struct folio *folio)
2943 {
2944 	struct address_space * const mapping = folio->mapping;
2945 	struct buffer_head *buffers_to_free = NULL;
2946 	bool ret = 0;
2947 
2948 	BUG_ON(!folio_test_locked(folio));
2949 	if (folio_test_writeback(folio))
2950 		return false;
2951 
2952 	/* Misconfigured folio check */
2953 	if (WARN_ON_ONCE(!folio_buffers(folio)))
2954 		return true;
2955 
2956 	if (mapping == NULL) {		/* can this still happen? */
2957 		ret = drop_buffers(folio, &buffers_to_free);
2958 		goto out;
2959 	}
2960 
2961 	spin_lock(&mapping->i_private_lock);
2962 	ret = drop_buffers(folio, &buffers_to_free);
2963 
2964 	/*
2965 	 * If the filesystem writes its buffers by hand (eg ext3)
2966 	 * then we can have clean buffers against a dirty folio.  We
2967 	 * clean the folio here; otherwise the VM will never notice
2968 	 * that the filesystem did any IO at all.
2969 	 *
2970 	 * Also, during truncate, discard_buffer will have marked all
2971 	 * the folio's buffers clean.  We discover that here and clean
2972 	 * the folio also.
2973 	 *
2974 	 * i_private_lock must be held over this entire operation in order
2975 	 * to synchronise against block_dirty_folio and prevent the
2976 	 * dirty bit from being lost.
2977 	 */
2978 	if (ret)
2979 		folio_cancel_dirty(folio);
2980 	spin_unlock(&mapping->i_private_lock);
2981 out:
2982 	if (buffers_to_free) {
2983 		struct buffer_head *bh = buffers_to_free;
2984 
2985 		do {
2986 			struct buffer_head *next = bh->b_this_page;
2987 			free_buffer_head(bh);
2988 			bh = next;
2989 		} while (bh != buffers_to_free);
2990 	}
2991 	return ret;
2992 }
2993 EXPORT_SYMBOL(try_to_free_buffers);
2994 
2995 /*
2996  * Buffer-head allocation
2997  */
2998 static struct kmem_cache *bh_cachep __ro_after_init;
2999 
3000 /*
3001  * Once the number of bh's in the machine exceeds this level, we start
3002  * stripping them in writeback.
3003  */
3004 static unsigned long max_buffer_heads __ro_after_init;
3005 
3006 int buffer_heads_over_limit;
3007 
3008 struct bh_accounting {
3009 	int nr;			/* Number of live bh's */
3010 	int ratelimit;		/* Limit cacheline bouncing */
3011 };
3012 
3013 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3014 
3015 static void recalc_bh_state(void)
3016 {
3017 	int i;
3018 	int tot = 0;
3019 
3020 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3021 		return;
3022 	__this_cpu_write(bh_accounting.ratelimit, 0);
3023 	for_each_online_cpu(i)
3024 		tot += per_cpu(bh_accounting, i).nr;
3025 	buffer_heads_over_limit = (tot > max_buffer_heads);
3026 }
3027 
3028 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3029 {
3030 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3031 	if (ret) {
3032 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3033 		spin_lock_init(&ret->b_uptodate_lock);
3034 		preempt_disable();
3035 		__this_cpu_inc(bh_accounting.nr);
3036 		recalc_bh_state();
3037 		preempt_enable();
3038 	}
3039 	return ret;
3040 }
3041 EXPORT_SYMBOL(alloc_buffer_head);
3042 
3043 void free_buffer_head(struct buffer_head *bh)
3044 {
3045 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3046 	kmem_cache_free(bh_cachep, bh);
3047 	preempt_disable();
3048 	__this_cpu_dec(bh_accounting.nr);
3049 	recalc_bh_state();
3050 	preempt_enable();
3051 }
3052 EXPORT_SYMBOL(free_buffer_head);
3053 
3054 static int buffer_exit_cpu_dead(unsigned int cpu)
3055 {
3056 	int i;
3057 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3058 
3059 	for (i = 0; i < BH_LRU_SIZE; i++) {
3060 		brelse(b->bhs[i]);
3061 		b->bhs[i] = NULL;
3062 	}
3063 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3064 	per_cpu(bh_accounting, cpu).nr = 0;
3065 	return 0;
3066 }
3067 
3068 /**
3069  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3070  * @bh: struct buffer_head
3071  *
3072  * Return true if the buffer is up-to-date and false,
3073  * with the buffer locked, if not.
3074  */
3075 int bh_uptodate_or_lock(struct buffer_head *bh)
3076 {
3077 	if (!buffer_uptodate(bh)) {
3078 		lock_buffer(bh);
3079 		if (!buffer_uptodate(bh))
3080 			return 0;
3081 		unlock_buffer(bh);
3082 	}
3083 	return 1;
3084 }
3085 EXPORT_SYMBOL(bh_uptodate_or_lock);
3086 
3087 /**
3088  * __bh_read - Submit read for a locked buffer
3089  * @bh: struct buffer_head
3090  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3091  * @wait: wait until reading finish
3092  *
3093  * Returns zero on success or don't wait, and -EIO on error.
3094  */
3095 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3096 {
3097 	int ret = 0;
3098 
3099 	BUG_ON(!buffer_locked(bh));
3100 
3101 	get_bh(bh);
3102 	bh->b_end_io = end_buffer_read_sync;
3103 	submit_bh(REQ_OP_READ | op_flags, bh);
3104 	if (wait) {
3105 		wait_on_buffer(bh);
3106 		if (!buffer_uptodate(bh))
3107 			ret = -EIO;
3108 	}
3109 	return ret;
3110 }
3111 EXPORT_SYMBOL(__bh_read);
3112 
3113 /**
3114  * __bh_read_batch - Submit read for a batch of unlocked buffers
3115  * @nr: entry number of the buffer batch
3116  * @bhs: a batch of struct buffer_head
3117  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3118  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3119  *              buffer that cannot lock.
3120  *
3121  * Returns zero on success or don't wait, and -EIO on error.
3122  */
3123 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3124 		     blk_opf_t op_flags, bool force_lock)
3125 {
3126 	int i;
3127 
3128 	for (i = 0; i < nr; i++) {
3129 		struct buffer_head *bh = bhs[i];
3130 
3131 		if (buffer_uptodate(bh))
3132 			continue;
3133 
3134 		if (force_lock)
3135 			lock_buffer(bh);
3136 		else
3137 			if (!trylock_buffer(bh))
3138 				continue;
3139 
3140 		if (buffer_uptodate(bh)) {
3141 			unlock_buffer(bh);
3142 			continue;
3143 		}
3144 
3145 		bh->b_end_io = end_buffer_read_sync;
3146 		get_bh(bh);
3147 		submit_bh(REQ_OP_READ | op_flags, bh);
3148 	}
3149 }
3150 EXPORT_SYMBOL(__bh_read_batch);
3151 
3152 void __init buffer_init(void)
3153 {
3154 	unsigned long nrpages;
3155 	int ret;
3156 
3157 	bh_cachep = KMEM_CACHE(buffer_head,
3158 				SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3159 	/*
3160 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3161 	 */
3162 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3163 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3164 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3165 					NULL, buffer_exit_cpu_dead);
3166 	WARN_ON(ret < 0);
3167 }
3168