xref: /linux/fs/buffer.c (revision f694f30e81c4ade358eb8c75273bac1a48f0cb8f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/buffer.c
4  *
5  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6  */
7 
8 /*
9  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10  *
11  * Removed a lot of unnecessary code and simplified things now that
12  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13  *
14  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16  *
17  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18  *
19  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/syscalls.h>
25 #include <linux/fs.h>
26 #include <linux/iomap.h>
27 #include <linux/mm.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/capability.h>
31 #include <linux/blkdev.h>
32 #include <linux/file.h>
33 #include <linux/quotaops.h>
34 #include <linux/highmem.h>
35 #include <linux/export.h>
36 #include <linux/backing-dev.h>
37 #include <linux/writeback.h>
38 #include <linux/hash.h>
39 #include <linux/suspend.h>
40 #include <linux/buffer_head.h>
41 #include <linux/task_io_accounting_ops.h>
42 #include <linux/bio.h>
43 #include <linux/cpu.h>
44 #include <linux/bitops.h>
45 #include <linux/mpage.h>
46 #include <linux/bit_spinlock.h>
47 #include <linux/pagevec.h>
48 #include <linux/sched/mm.h>
49 #include <trace/events/block.h>
50 #include <linux/fscrypt.h>
51 #include <linux/fsverity.h>
52 #include <linux/sched/isolation.h>
53 
54 #include "internal.h"
55 
56 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58 			  enum rw_hint hint, struct writeback_control *wbc);
59 
60 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61 
62 inline void touch_buffer(struct buffer_head *bh)
63 {
64 	trace_block_touch_buffer(bh);
65 	folio_mark_accessed(bh->b_folio);
66 }
67 EXPORT_SYMBOL(touch_buffer);
68 
69 void __lock_buffer(struct buffer_head *bh)
70 {
71 	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72 }
73 EXPORT_SYMBOL(__lock_buffer);
74 
75 void unlock_buffer(struct buffer_head *bh)
76 {
77 	clear_bit_unlock(BH_Lock, &bh->b_state);
78 	smp_mb__after_atomic();
79 	wake_up_bit(&bh->b_state, BH_Lock);
80 }
81 EXPORT_SYMBOL(unlock_buffer);
82 
83 /*
84  * Returns if the folio has dirty or writeback buffers. If all the buffers
85  * are unlocked and clean then the folio_test_dirty information is stale. If
86  * any of the buffers are locked, it is assumed they are locked for IO.
87  */
88 void buffer_check_dirty_writeback(struct folio *folio,
89 				     bool *dirty, bool *writeback)
90 {
91 	struct buffer_head *head, *bh;
92 	*dirty = false;
93 	*writeback = false;
94 
95 	BUG_ON(!folio_test_locked(folio));
96 
97 	head = folio_buffers(folio);
98 	if (!head)
99 		return;
100 
101 	if (folio_test_writeback(folio))
102 		*writeback = true;
103 
104 	bh = head;
105 	do {
106 		if (buffer_locked(bh))
107 			*writeback = true;
108 
109 		if (buffer_dirty(bh))
110 			*dirty = true;
111 
112 		bh = bh->b_this_page;
113 	} while (bh != head);
114 }
115 
116 /*
117  * Block until a buffer comes unlocked.  This doesn't stop it
118  * from becoming locked again - you have to lock it yourself
119  * if you want to preserve its state.
120  */
121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123 	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126 
127 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 {
129 	if (!test_bit(BH_Quiet, &bh->b_state))
130 		printk_ratelimited(KERN_ERR
131 			"Buffer I/O error on dev %pg, logical block %llu%s\n",
132 			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133 }
134 
135 /*
136  * End-of-IO handler helper function which does not touch the bh after
137  * unlocking it.
138  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139  * a race there is benign: unlock_buffer() only use the bh's address for
140  * hashing after unlocking the buffer, so it doesn't actually touch the bh
141  * itself.
142  */
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144 {
145 	if (uptodate) {
146 		set_buffer_uptodate(bh);
147 	} else {
148 		/* This happens, due to failed read-ahead attempts. */
149 		clear_buffer_uptodate(bh);
150 	}
151 	unlock_buffer(bh);
152 }
153 
154 /*
155  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
156  * unlock the buffer.
157  */
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 {
160 	__end_buffer_read_notouch(bh, uptodate);
161 	put_bh(bh);
162 }
163 EXPORT_SYMBOL(end_buffer_read_sync);
164 
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166 {
167 	if (uptodate) {
168 		set_buffer_uptodate(bh);
169 	} else {
170 		buffer_io_error(bh, ", lost sync page write");
171 		mark_buffer_write_io_error(bh);
172 		clear_buffer_uptodate(bh);
173 	}
174 	unlock_buffer(bh);
175 	put_bh(bh);
176 }
177 EXPORT_SYMBOL(end_buffer_write_sync);
178 
179 /*
180  * Various filesystems appear to want __find_get_block to be non-blocking.
181  * But it's the page lock which protects the buffers.  To get around this,
182  * we get exclusion from try_to_free_buffers with the blockdev mapping's
183  * i_private_lock.
184  *
185  * Hack idea: for the blockdev mapping, i_private_lock contention
186  * may be quite high.  This code could TryLock the page, and if that
187  * succeeds, there is no need to take i_private_lock.
188  */
189 static struct buffer_head *
190 __find_get_block_slow(struct block_device *bdev, sector_t block)
191 {
192 	struct address_space *bd_mapping = bdev->bd_mapping;
193 	const int blkbits = bd_mapping->host->i_blkbits;
194 	struct buffer_head *ret = NULL;
195 	pgoff_t index;
196 	struct buffer_head *bh;
197 	struct buffer_head *head;
198 	struct folio *folio;
199 	int all_mapped = 1;
200 	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201 
202 	index = ((loff_t)block << blkbits) / PAGE_SIZE;
203 	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204 	if (IS_ERR(folio))
205 		goto out;
206 
207 	spin_lock(&bd_mapping->i_private_lock);
208 	head = folio_buffers(folio);
209 	if (!head)
210 		goto out_unlock;
211 	bh = head;
212 	do {
213 		if (!buffer_mapped(bh))
214 			all_mapped = 0;
215 		else if (bh->b_blocknr == block) {
216 			ret = bh;
217 			get_bh(bh);
218 			goto out_unlock;
219 		}
220 		bh = bh->b_this_page;
221 	} while (bh != head);
222 
223 	/* we might be here because some of the buffers on this page are
224 	 * not mapped.  This is due to various races between
225 	 * file io on the block device and getblk.  It gets dealt with
226 	 * elsewhere, don't buffer_error if we had some unmapped buffers
227 	 */
228 	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229 	if (all_mapped && __ratelimit(&last_warned)) {
230 		printk("__find_get_block_slow() failed. block=%llu, "
231 		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232 		       "device %pg blocksize: %d\n",
233 		       (unsigned long long)block,
234 		       (unsigned long long)bh->b_blocknr,
235 		       bh->b_state, bh->b_size, bdev,
236 		       1 << blkbits);
237 	}
238 out_unlock:
239 	spin_unlock(&bd_mapping->i_private_lock);
240 	folio_put(folio);
241 out:
242 	return ret;
243 }
244 
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246 {
247 	unsigned long flags;
248 	struct buffer_head *first;
249 	struct buffer_head *tmp;
250 	struct folio *folio;
251 	int folio_uptodate = 1;
252 
253 	BUG_ON(!buffer_async_read(bh));
254 
255 	folio = bh->b_folio;
256 	if (uptodate) {
257 		set_buffer_uptodate(bh);
258 	} else {
259 		clear_buffer_uptodate(bh);
260 		buffer_io_error(bh, ", async page read");
261 	}
262 
263 	/*
264 	 * Be _very_ careful from here on. Bad things can happen if
265 	 * two buffer heads end IO at almost the same time and both
266 	 * decide that the page is now completely done.
267 	 */
268 	first = folio_buffers(folio);
269 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
270 	clear_buffer_async_read(bh);
271 	unlock_buffer(bh);
272 	tmp = bh;
273 	do {
274 		if (!buffer_uptodate(tmp))
275 			folio_uptodate = 0;
276 		if (buffer_async_read(tmp)) {
277 			BUG_ON(!buffer_locked(tmp));
278 			goto still_busy;
279 		}
280 		tmp = tmp->b_this_page;
281 	} while (tmp != bh);
282 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
283 
284 	folio_end_read(folio, folio_uptodate);
285 	return;
286 
287 still_busy:
288 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
289 	return;
290 }
291 
292 struct postprocess_bh_ctx {
293 	struct work_struct work;
294 	struct buffer_head *bh;
295 };
296 
297 static void verify_bh(struct work_struct *work)
298 {
299 	struct postprocess_bh_ctx *ctx =
300 		container_of(work, struct postprocess_bh_ctx, work);
301 	struct buffer_head *bh = ctx->bh;
302 	bool valid;
303 
304 	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
305 	end_buffer_async_read(bh, valid);
306 	kfree(ctx);
307 }
308 
309 static bool need_fsverity(struct buffer_head *bh)
310 {
311 	struct folio *folio = bh->b_folio;
312 	struct inode *inode = folio->mapping->host;
313 
314 	return fsverity_active(inode) &&
315 		/* needed by ext4 */
316 		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
317 }
318 
319 static void decrypt_bh(struct work_struct *work)
320 {
321 	struct postprocess_bh_ctx *ctx =
322 		container_of(work, struct postprocess_bh_ctx, work);
323 	struct buffer_head *bh = ctx->bh;
324 	int err;
325 
326 	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
327 					       bh_offset(bh));
328 	if (err == 0 && need_fsverity(bh)) {
329 		/*
330 		 * We use different work queues for decryption and for verity
331 		 * because verity may require reading metadata pages that need
332 		 * decryption, and we shouldn't recurse to the same workqueue.
333 		 */
334 		INIT_WORK(&ctx->work, verify_bh);
335 		fsverity_enqueue_verify_work(&ctx->work);
336 		return;
337 	}
338 	end_buffer_async_read(bh, err == 0);
339 	kfree(ctx);
340 }
341 
342 /*
343  * I/O completion handler for block_read_full_folio() - pages
344  * which come unlocked at the end of I/O.
345  */
346 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
347 {
348 	struct inode *inode = bh->b_folio->mapping->host;
349 	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
350 	bool verify = need_fsverity(bh);
351 
352 	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
353 	if (uptodate && (decrypt || verify)) {
354 		struct postprocess_bh_ctx *ctx =
355 			kmalloc(sizeof(*ctx), GFP_ATOMIC);
356 
357 		if (ctx) {
358 			ctx->bh = bh;
359 			if (decrypt) {
360 				INIT_WORK(&ctx->work, decrypt_bh);
361 				fscrypt_enqueue_decrypt_work(&ctx->work);
362 			} else {
363 				INIT_WORK(&ctx->work, verify_bh);
364 				fsverity_enqueue_verify_work(&ctx->work);
365 			}
366 			return;
367 		}
368 		uptodate = 0;
369 	}
370 	end_buffer_async_read(bh, uptodate);
371 }
372 
373 /*
374  * Completion handler for block_write_full_folio() - folios which are unlocked
375  * during I/O, and which have the writeback flag cleared upon I/O completion.
376  */
377 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
378 {
379 	unsigned long flags;
380 	struct buffer_head *first;
381 	struct buffer_head *tmp;
382 	struct folio *folio;
383 
384 	BUG_ON(!buffer_async_write(bh));
385 
386 	folio = bh->b_folio;
387 	if (uptodate) {
388 		set_buffer_uptodate(bh);
389 	} else {
390 		buffer_io_error(bh, ", lost async page write");
391 		mark_buffer_write_io_error(bh);
392 		clear_buffer_uptodate(bh);
393 	}
394 
395 	first = folio_buffers(folio);
396 	spin_lock_irqsave(&first->b_uptodate_lock, flags);
397 
398 	clear_buffer_async_write(bh);
399 	unlock_buffer(bh);
400 	tmp = bh->b_this_page;
401 	while (tmp != bh) {
402 		if (buffer_async_write(tmp)) {
403 			BUG_ON(!buffer_locked(tmp));
404 			goto still_busy;
405 		}
406 		tmp = tmp->b_this_page;
407 	}
408 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
409 	folio_end_writeback(folio);
410 	return;
411 
412 still_busy:
413 	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
414 	return;
415 }
416 
417 /*
418  * If a page's buffers are under async readin (end_buffer_async_read
419  * completion) then there is a possibility that another thread of
420  * control could lock one of the buffers after it has completed
421  * but while some of the other buffers have not completed.  This
422  * locked buffer would confuse end_buffer_async_read() into not unlocking
423  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
424  * that this buffer is not under async I/O.
425  *
426  * The page comes unlocked when it has no locked buffer_async buffers
427  * left.
428  *
429  * PageLocked prevents anyone starting new async I/O reads any of
430  * the buffers.
431  *
432  * PageWriteback is used to prevent simultaneous writeout of the same
433  * page.
434  *
435  * PageLocked prevents anyone from starting writeback of a page which is
436  * under read I/O (PageWriteback is only ever set against a locked page).
437  */
438 static void mark_buffer_async_read(struct buffer_head *bh)
439 {
440 	bh->b_end_io = end_buffer_async_read_io;
441 	set_buffer_async_read(bh);
442 }
443 
444 static void mark_buffer_async_write_endio(struct buffer_head *bh,
445 					  bh_end_io_t *handler)
446 {
447 	bh->b_end_io = handler;
448 	set_buffer_async_write(bh);
449 }
450 
451 void mark_buffer_async_write(struct buffer_head *bh)
452 {
453 	mark_buffer_async_write_endio(bh, end_buffer_async_write);
454 }
455 EXPORT_SYMBOL(mark_buffer_async_write);
456 
457 
458 /*
459  * fs/buffer.c contains helper functions for buffer-backed address space's
460  * fsync functions.  A common requirement for buffer-based filesystems is
461  * that certain data from the backing blockdev needs to be written out for
462  * a successful fsync().  For example, ext2 indirect blocks need to be
463  * written back and waited upon before fsync() returns.
464  *
465  * The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
466  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
467  * management of a list of dependent buffers at ->i_mapping->i_private_list.
468  *
469  * Locking is a little subtle: try_to_free_buffers() will remove buffers
470  * from their controlling inode's queue when they are being freed.  But
471  * try_to_free_buffers() will be operating against the *blockdev* mapping
472  * at the time, not against the S_ISREG file which depends on those buffers.
473  * So the locking for i_private_list is via the i_private_lock in the address_space
474  * which backs the buffers.  Which is different from the address_space
475  * against which the buffers are listed.  So for a particular address_space,
476  * mapping->i_private_lock does *not* protect mapping->i_private_list!  In fact,
477  * mapping->i_private_list will always be protected by the backing blockdev's
478  * ->i_private_lock.
479  *
480  * Which introduces a requirement: all buffers on an address_space's
481  * ->i_private_list must be from the same address_space: the blockdev's.
482  *
483  * address_spaces which do not place buffers at ->i_private_list via these
484  * utility functions are free to use i_private_lock and i_private_list for
485  * whatever they want.  The only requirement is that list_empty(i_private_list)
486  * be true at clear_inode() time.
487  *
488  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
489  * filesystems should do that.  invalidate_inode_buffers() should just go
490  * BUG_ON(!list_empty).
491  *
492  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
493  * take an address_space, not an inode.  And it should be called
494  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
495  * queued up.
496  *
497  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
498  * list if it is already on a list.  Because if the buffer is on a list,
499  * it *must* already be on the right one.  If not, the filesystem is being
500  * silly.  This will save a ton of locking.  But first we have to ensure
501  * that buffers are taken *off* the old inode's list when they are freed
502  * (presumably in truncate).  That requires careful auditing of all
503  * filesystems (do it inside bforget()).  It could also be done by bringing
504  * b_inode back.
505  */
506 
507 /*
508  * The buffer's backing address_space's i_private_lock must be held
509  */
510 static void __remove_assoc_queue(struct buffer_head *bh)
511 {
512 	list_del_init(&bh->b_assoc_buffers);
513 	WARN_ON(!bh->b_assoc_map);
514 	bh->b_assoc_map = NULL;
515 }
516 
517 int inode_has_buffers(struct inode *inode)
518 {
519 	return !list_empty(&inode->i_data.i_private_list);
520 }
521 
522 /*
523  * osync is designed to support O_SYNC io.  It waits synchronously for
524  * all already-submitted IO to complete, but does not queue any new
525  * writes to the disk.
526  *
527  * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
528  * as you dirty the buffers, and then use osync_inode_buffers to wait for
529  * completion.  Any other dirty buffers which are not yet queued for
530  * write will not be flushed to disk by the osync.
531  */
532 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
533 {
534 	struct buffer_head *bh;
535 	struct list_head *p;
536 	int err = 0;
537 
538 	spin_lock(lock);
539 repeat:
540 	list_for_each_prev(p, list) {
541 		bh = BH_ENTRY(p);
542 		if (buffer_locked(bh)) {
543 			get_bh(bh);
544 			spin_unlock(lock);
545 			wait_on_buffer(bh);
546 			if (!buffer_uptodate(bh))
547 				err = -EIO;
548 			brelse(bh);
549 			spin_lock(lock);
550 			goto repeat;
551 		}
552 	}
553 	spin_unlock(lock);
554 	return err;
555 }
556 
557 /**
558  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
559  * @mapping: the mapping which wants those buffers written
560  *
561  * Starts I/O against the buffers at mapping->i_private_list, and waits upon
562  * that I/O.
563  *
564  * Basically, this is a convenience function for fsync().
565  * @mapping is a file or directory which needs those buffers to be written for
566  * a successful fsync().
567  */
568 int sync_mapping_buffers(struct address_space *mapping)
569 {
570 	struct address_space *buffer_mapping = mapping->i_private_data;
571 
572 	if (buffer_mapping == NULL || list_empty(&mapping->i_private_list))
573 		return 0;
574 
575 	return fsync_buffers_list(&buffer_mapping->i_private_lock,
576 					&mapping->i_private_list);
577 }
578 EXPORT_SYMBOL(sync_mapping_buffers);
579 
580 /**
581  * generic_buffers_fsync_noflush - generic buffer fsync implementation
582  * for simple filesystems with no inode lock
583  *
584  * @file:	file to synchronize
585  * @start:	start offset in bytes
586  * @end:	end offset in bytes (inclusive)
587  * @datasync:	only synchronize essential metadata if true
588  *
589  * This is a generic implementation of the fsync method for simple
590  * filesystems which track all non-inode metadata in the buffers list
591  * hanging off the address_space structure.
592  */
593 int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
594 				  bool datasync)
595 {
596 	struct inode *inode = file->f_mapping->host;
597 	int err;
598 	int ret;
599 
600 	err = file_write_and_wait_range(file, start, end);
601 	if (err)
602 		return err;
603 
604 	ret = sync_mapping_buffers(inode->i_mapping);
605 	if (!(inode->i_state & I_DIRTY_ALL))
606 		goto out;
607 	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
608 		goto out;
609 
610 	err = sync_inode_metadata(inode, 1);
611 	if (ret == 0)
612 		ret = err;
613 
614 out:
615 	/* check and advance again to catch errors after syncing out buffers */
616 	err = file_check_and_advance_wb_err(file);
617 	if (ret == 0)
618 		ret = err;
619 	return ret;
620 }
621 EXPORT_SYMBOL(generic_buffers_fsync_noflush);
622 
623 /**
624  * generic_buffers_fsync - generic buffer fsync implementation
625  * for simple filesystems with no inode lock
626  *
627  * @file:	file to synchronize
628  * @start:	start offset in bytes
629  * @end:	end offset in bytes (inclusive)
630  * @datasync:	only synchronize essential metadata if true
631  *
632  * This is a generic implementation of the fsync method for simple
633  * filesystems which track all non-inode metadata in the buffers list
634  * hanging off the address_space structure. This also makes sure that
635  * a device cache flush operation is called at the end.
636  */
637 int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
638 			  bool datasync)
639 {
640 	struct inode *inode = file->f_mapping->host;
641 	int ret;
642 
643 	ret = generic_buffers_fsync_noflush(file, start, end, datasync);
644 	if (!ret)
645 		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
646 	return ret;
647 }
648 EXPORT_SYMBOL(generic_buffers_fsync);
649 
650 /*
651  * Called when we've recently written block `bblock', and it is known that
652  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
653  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
654  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
655  */
656 void write_boundary_block(struct block_device *bdev,
657 			sector_t bblock, unsigned blocksize)
658 {
659 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
660 	if (bh) {
661 		if (buffer_dirty(bh))
662 			write_dirty_buffer(bh, 0);
663 		put_bh(bh);
664 	}
665 }
666 
667 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
668 {
669 	struct address_space *mapping = inode->i_mapping;
670 	struct address_space *buffer_mapping = bh->b_folio->mapping;
671 
672 	mark_buffer_dirty(bh);
673 	if (!mapping->i_private_data) {
674 		mapping->i_private_data = buffer_mapping;
675 	} else {
676 		BUG_ON(mapping->i_private_data != buffer_mapping);
677 	}
678 	if (!bh->b_assoc_map) {
679 		spin_lock(&buffer_mapping->i_private_lock);
680 		list_move_tail(&bh->b_assoc_buffers,
681 				&mapping->i_private_list);
682 		bh->b_assoc_map = mapping;
683 		spin_unlock(&buffer_mapping->i_private_lock);
684 	}
685 }
686 EXPORT_SYMBOL(mark_buffer_dirty_inode);
687 
688 /**
689  * block_dirty_folio - Mark a folio as dirty.
690  * @mapping: The address space containing this folio.
691  * @folio: The folio to mark dirty.
692  *
693  * Filesystems which use buffer_heads can use this function as their
694  * ->dirty_folio implementation.  Some filesystems need to do a little
695  * work before calling this function.  Filesystems which do not use
696  * buffer_heads should call filemap_dirty_folio() instead.
697  *
698  * If the folio has buffers, the uptodate buffers are set dirty, to
699  * preserve dirty-state coherency between the folio and the buffers.
700  * Buffers added to a dirty folio are created dirty.
701  *
702  * The buffers are dirtied before the folio is dirtied.  There's a small
703  * race window in which writeback may see the folio cleanness but not the
704  * buffer dirtiness.  That's fine.  If this code were to set the folio
705  * dirty before the buffers, writeback could clear the folio dirty flag,
706  * see a bunch of clean buffers and we'd end up with dirty buffers/clean
707  * folio on the dirty folio list.
708  *
709  * We use i_private_lock to lock against try_to_free_buffers() while
710  * using the folio's buffer list.  This also prevents clean buffers
711  * being added to the folio after it was set dirty.
712  *
713  * Context: May only be called from process context.  Does not sleep.
714  * Caller must ensure that @folio cannot be truncated during this call,
715  * typically by holding the folio lock or having a page in the folio
716  * mapped and holding the page table lock.
717  *
718  * Return: True if the folio was dirtied; false if it was already dirtied.
719  */
720 bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
721 {
722 	struct buffer_head *head;
723 	bool newly_dirty;
724 
725 	spin_lock(&mapping->i_private_lock);
726 	head = folio_buffers(folio);
727 	if (head) {
728 		struct buffer_head *bh = head;
729 
730 		do {
731 			set_buffer_dirty(bh);
732 			bh = bh->b_this_page;
733 		} while (bh != head);
734 	}
735 	/*
736 	 * Lock out page's memcg migration to keep PageDirty
737 	 * synchronized with per-memcg dirty page counters.
738 	 */
739 	newly_dirty = !folio_test_set_dirty(folio);
740 	spin_unlock(&mapping->i_private_lock);
741 
742 	if (newly_dirty)
743 		__folio_mark_dirty(folio, mapping, 1);
744 
745 	if (newly_dirty)
746 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
747 
748 	return newly_dirty;
749 }
750 EXPORT_SYMBOL(block_dirty_folio);
751 
752 /*
753  * Write out and wait upon a list of buffers.
754  *
755  * We have conflicting pressures: we want to make sure that all
756  * initially dirty buffers get waited on, but that any subsequently
757  * dirtied buffers don't.  After all, we don't want fsync to last
758  * forever if somebody is actively writing to the file.
759  *
760  * Do this in two main stages: first we copy dirty buffers to a
761  * temporary inode list, queueing the writes as we go.  Then we clean
762  * up, waiting for those writes to complete.
763  *
764  * During this second stage, any subsequent updates to the file may end
765  * up refiling the buffer on the original inode's dirty list again, so
766  * there is a chance we will end up with a buffer queued for write but
767  * not yet completed on that list.  So, as a final cleanup we go through
768  * the osync code to catch these locked, dirty buffers without requeuing
769  * any newly dirty buffers for write.
770  */
771 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
772 {
773 	struct buffer_head *bh;
774 	struct address_space *mapping;
775 	int err = 0, err2;
776 	struct blk_plug plug;
777 	LIST_HEAD(tmp);
778 
779 	blk_start_plug(&plug);
780 
781 	spin_lock(lock);
782 	while (!list_empty(list)) {
783 		bh = BH_ENTRY(list->next);
784 		mapping = bh->b_assoc_map;
785 		__remove_assoc_queue(bh);
786 		/* Avoid race with mark_buffer_dirty_inode() which does
787 		 * a lockless check and we rely on seeing the dirty bit */
788 		smp_mb();
789 		if (buffer_dirty(bh) || buffer_locked(bh)) {
790 			list_add(&bh->b_assoc_buffers, &tmp);
791 			bh->b_assoc_map = mapping;
792 			if (buffer_dirty(bh)) {
793 				get_bh(bh);
794 				spin_unlock(lock);
795 				/*
796 				 * Ensure any pending I/O completes so that
797 				 * write_dirty_buffer() actually writes the
798 				 * current contents - it is a noop if I/O is
799 				 * still in flight on potentially older
800 				 * contents.
801 				 */
802 				write_dirty_buffer(bh, REQ_SYNC);
803 
804 				/*
805 				 * Kick off IO for the previous mapping. Note
806 				 * that we will not run the very last mapping,
807 				 * wait_on_buffer() will do that for us
808 				 * through sync_buffer().
809 				 */
810 				brelse(bh);
811 				spin_lock(lock);
812 			}
813 		}
814 	}
815 
816 	spin_unlock(lock);
817 	blk_finish_plug(&plug);
818 	spin_lock(lock);
819 
820 	while (!list_empty(&tmp)) {
821 		bh = BH_ENTRY(tmp.prev);
822 		get_bh(bh);
823 		mapping = bh->b_assoc_map;
824 		__remove_assoc_queue(bh);
825 		/* Avoid race with mark_buffer_dirty_inode() which does
826 		 * a lockless check and we rely on seeing the dirty bit */
827 		smp_mb();
828 		if (buffer_dirty(bh)) {
829 			list_add(&bh->b_assoc_buffers,
830 				 &mapping->i_private_list);
831 			bh->b_assoc_map = mapping;
832 		}
833 		spin_unlock(lock);
834 		wait_on_buffer(bh);
835 		if (!buffer_uptodate(bh))
836 			err = -EIO;
837 		brelse(bh);
838 		spin_lock(lock);
839 	}
840 
841 	spin_unlock(lock);
842 	err2 = osync_buffers_list(lock, list);
843 	if (err)
844 		return err;
845 	else
846 		return err2;
847 }
848 
849 /*
850  * Invalidate any and all dirty buffers on a given inode.  We are
851  * probably unmounting the fs, but that doesn't mean we have already
852  * done a sync().  Just drop the buffers from the inode list.
853  *
854  * NOTE: we take the inode's blockdev's mapping's i_private_lock.  Which
855  * assumes that all the buffers are against the blockdev.
856  */
857 void invalidate_inode_buffers(struct inode *inode)
858 {
859 	if (inode_has_buffers(inode)) {
860 		struct address_space *mapping = &inode->i_data;
861 		struct list_head *list = &mapping->i_private_list;
862 		struct address_space *buffer_mapping = mapping->i_private_data;
863 
864 		spin_lock(&buffer_mapping->i_private_lock);
865 		while (!list_empty(list))
866 			__remove_assoc_queue(BH_ENTRY(list->next));
867 		spin_unlock(&buffer_mapping->i_private_lock);
868 	}
869 }
870 EXPORT_SYMBOL(invalidate_inode_buffers);
871 
872 /*
873  * Remove any clean buffers from the inode's buffer list.  This is called
874  * when we're trying to free the inode itself.  Those buffers can pin it.
875  *
876  * Returns true if all buffers were removed.
877  */
878 int remove_inode_buffers(struct inode *inode)
879 {
880 	int ret = 1;
881 
882 	if (inode_has_buffers(inode)) {
883 		struct address_space *mapping = &inode->i_data;
884 		struct list_head *list = &mapping->i_private_list;
885 		struct address_space *buffer_mapping = mapping->i_private_data;
886 
887 		spin_lock(&buffer_mapping->i_private_lock);
888 		while (!list_empty(list)) {
889 			struct buffer_head *bh = BH_ENTRY(list->next);
890 			if (buffer_dirty(bh)) {
891 				ret = 0;
892 				break;
893 			}
894 			__remove_assoc_queue(bh);
895 		}
896 		spin_unlock(&buffer_mapping->i_private_lock);
897 	}
898 	return ret;
899 }
900 
901 /*
902  * Create the appropriate buffers when given a folio for data area and
903  * the size of each buffer.. Use the bh->b_this_page linked list to
904  * follow the buffers created.  Return NULL if unable to create more
905  * buffers.
906  *
907  * The retry flag is used to differentiate async IO (paging, swapping)
908  * which may not fail from ordinary buffer allocations.
909  */
910 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
911 					gfp_t gfp)
912 {
913 	struct buffer_head *bh, *head;
914 	long offset;
915 	struct mem_cgroup *memcg, *old_memcg;
916 
917 	/* The folio lock pins the memcg */
918 	memcg = folio_memcg(folio);
919 	old_memcg = set_active_memcg(memcg);
920 
921 	head = NULL;
922 	offset = folio_size(folio);
923 	while ((offset -= size) >= 0) {
924 		bh = alloc_buffer_head(gfp);
925 		if (!bh)
926 			goto no_grow;
927 
928 		bh->b_this_page = head;
929 		bh->b_blocknr = -1;
930 		head = bh;
931 
932 		bh->b_size = size;
933 
934 		/* Link the buffer to its folio */
935 		folio_set_bh(bh, folio, offset);
936 	}
937 out:
938 	set_active_memcg(old_memcg);
939 	return head;
940 /*
941  * In case anything failed, we just free everything we got.
942  */
943 no_grow:
944 	if (head) {
945 		do {
946 			bh = head;
947 			head = head->b_this_page;
948 			free_buffer_head(bh);
949 		} while (head);
950 	}
951 
952 	goto out;
953 }
954 EXPORT_SYMBOL_GPL(folio_alloc_buffers);
955 
956 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size)
957 {
958 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
959 
960 	return folio_alloc_buffers(page_folio(page), size, gfp);
961 }
962 EXPORT_SYMBOL_GPL(alloc_page_buffers);
963 
964 static inline void link_dev_buffers(struct folio *folio,
965 		struct buffer_head *head)
966 {
967 	struct buffer_head *bh, *tail;
968 
969 	bh = head;
970 	do {
971 		tail = bh;
972 		bh = bh->b_this_page;
973 	} while (bh);
974 	tail->b_this_page = head;
975 	folio_attach_private(folio, head);
976 }
977 
978 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
979 {
980 	sector_t retval = ~((sector_t)0);
981 	loff_t sz = bdev_nr_bytes(bdev);
982 
983 	if (sz) {
984 		unsigned int sizebits = blksize_bits(size);
985 		retval = (sz >> sizebits);
986 	}
987 	return retval;
988 }
989 
990 /*
991  * Initialise the state of a blockdev folio's buffers.
992  */
993 static sector_t folio_init_buffers(struct folio *folio,
994 		struct block_device *bdev, unsigned size)
995 {
996 	struct buffer_head *head = folio_buffers(folio);
997 	struct buffer_head *bh = head;
998 	bool uptodate = folio_test_uptodate(folio);
999 	sector_t block = div_u64(folio_pos(folio), size);
1000 	sector_t end_block = blkdev_max_block(bdev, size);
1001 
1002 	do {
1003 		if (!buffer_mapped(bh)) {
1004 			bh->b_end_io = NULL;
1005 			bh->b_private = NULL;
1006 			bh->b_bdev = bdev;
1007 			bh->b_blocknr = block;
1008 			if (uptodate)
1009 				set_buffer_uptodate(bh);
1010 			if (block < end_block)
1011 				set_buffer_mapped(bh);
1012 		}
1013 		block++;
1014 		bh = bh->b_this_page;
1015 	} while (bh != head);
1016 
1017 	/*
1018 	 * Caller needs to validate requested block against end of device.
1019 	 */
1020 	return end_block;
1021 }
1022 
1023 /*
1024  * Create the page-cache folio that contains the requested block.
1025  *
1026  * This is used purely for blockdev mappings.
1027  *
1028  * Returns false if we have a failure which cannot be cured by retrying
1029  * without sleeping.  Returns true if we succeeded, or the caller should retry.
1030  */
1031 static bool grow_dev_folio(struct block_device *bdev, sector_t block,
1032 		pgoff_t index, unsigned size, gfp_t gfp)
1033 {
1034 	struct address_space *mapping = bdev->bd_mapping;
1035 	struct folio *folio;
1036 	struct buffer_head *bh;
1037 	sector_t end_block = 0;
1038 
1039 	folio = __filemap_get_folio(mapping, index,
1040 			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
1041 	if (IS_ERR(folio))
1042 		return false;
1043 
1044 	bh = folio_buffers(folio);
1045 	if (bh) {
1046 		if (bh->b_size == size) {
1047 			end_block = folio_init_buffers(folio, bdev, size);
1048 			goto unlock;
1049 		}
1050 
1051 		/*
1052 		 * Retrying may succeed; for example the folio may finish
1053 		 * writeback, or buffers may be cleaned.  This should not
1054 		 * happen very often; maybe we have old buffers attached to
1055 		 * this blockdev's page cache and we're trying to change
1056 		 * the block size?
1057 		 */
1058 		if (!try_to_free_buffers(folio)) {
1059 			end_block = ~0ULL;
1060 			goto unlock;
1061 		}
1062 	}
1063 
1064 	bh = folio_alloc_buffers(folio, size, gfp | __GFP_ACCOUNT);
1065 	if (!bh)
1066 		goto unlock;
1067 
1068 	/*
1069 	 * Link the folio to the buffers and initialise them.  Take the
1070 	 * lock to be atomic wrt __find_get_block(), which does not
1071 	 * run under the folio lock.
1072 	 */
1073 	spin_lock(&mapping->i_private_lock);
1074 	link_dev_buffers(folio, bh);
1075 	end_block = folio_init_buffers(folio, bdev, size);
1076 	spin_unlock(&mapping->i_private_lock);
1077 unlock:
1078 	folio_unlock(folio);
1079 	folio_put(folio);
1080 	return block < end_block;
1081 }
1082 
1083 /*
1084  * Create buffers for the specified block device block's folio.  If
1085  * that folio was dirty, the buffers are set dirty also.  Returns false
1086  * if we've hit a permanent error.
1087  */
1088 static bool grow_buffers(struct block_device *bdev, sector_t block,
1089 		unsigned size, gfp_t gfp)
1090 {
1091 	loff_t pos;
1092 
1093 	/*
1094 	 * Check for a block which lies outside our maximum possible
1095 	 * pagecache index.
1096 	 */
1097 	if (check_mul_overflow(block, (sector_t)size, &pos) || pos > MAX_LFS_FILESIZE) {
1098 		printk(KERN_ERR "%s: requested out-of-range block %llu for device %pg\n",
1099 			__func__, (unsigned long long)block,
1100 			bdev);
1101 		return false;
1102 	}
1103 
1104 	/* Create a folio with the proper size buffers */
1105 	return grow_dev_folio(bdev, block, pos / PAGE_SIZE, size, gfp);
1106 }
1107 
1108 static struct buffer_head *
1109 __getblk_slow(struct block_device *bdev, sector_t block,
1110 	     unsigned size, gfp_t gfp)
1111 {
1112 	/* Size must be multiple of hard sectorsize */
1113 	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1114 			(size < 512 || size > PAGE_SIZE))) {
1115 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1116 					size);
1117 		printk(KERN_ERR "logical block size: %d\n",
1118 					bdev_logical_block_size(bdev));
1119 
1120 		dump_stack();
1121 		return NULL;
1122 	}
1123 
1124 	for (;;) {
1125 		struct buffer_head *bh;
1126 
1127 		bh = __find_get_block(bdev, block, size);
1128 		if (bh)
1129 			return bh;
1130 
1131 		if (!grow_buffers(bdev, block, size, gfp))
1132 			return NULL;
1133 	}
1134 }
1135 
1136 /*
1137  * The relationship between dirty buffers and dirty pages:
1138  *
1139  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1140  * the page is tagged dirty in the page cache.
1141  *
1142  * At all times, the dirtiness of the buffers represents the dirtiness of
1143  * subsections of the page.  If the page has buffers, the page dirty bit is
1144  * merely a hint about the true dirty state.
1145  *
1146  * When a page is set dirty in its entirety, all its buffers are marked dirty
1147  * (if the page has buffers).
1148  *
1149  * When a buffer is marked dirty, its page is dirtied, but the page's other
1150  * buffers are not.
1151  *
1152  * Also.  When blockdev buffers are explicitly read with bread(), they
1153  * individually become uptodate.  But their backing page remains not
1154  * uptodate - even if all of its buffers are uptodate.  A subsequent
1155  * block_read_full_folio() against that folio will discover all the uptodate
1156  * buffers, will set the folio uptodate and will perform no I/O.
1157  */
1158 
1159 /**
1160  * mark_buffer_dirty - mark a buffer_head as needing writeout
1161  * @bh: the buffer_head to mark dirty
1162  *
1163  * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1164  * its backing page dirty, then tag the page as dirty in the page cache
1165  * and then attach the address_space's inode to its superblock's dirty
1166  * inode list.
1167  *
1168  * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->i_private_lock,
1169  * i_pages lock and mapping->host->i_lock.
1170  */
1171 void mark_buffer_dirty(struct buffer_head *bh)
1172 {
1173 	WARN_ON_ONCE(!buffer_uptodate(bh));
1174 
1175 	trace_block_dirty_buffer(bh);
1176 
1177 	/*
1178 	 * Very *carefully* optimize the it-is-already-dirty case.
1179 	 *
1180 	 * Don't let the final "is it dirty" escape to before we
1181 	 * perhaps modified the buffer.
1182 	 */
1183 	if (buffer_dirty(bh)) {
1184 		smp_mb();
1185 		if (buffer_dirty(bh))
1186 			return;
1187 	}
1188 
1189 	if (!test_set_buffer_dirty(bh)) {
1190 		struct folio *folio = bh->b_folio;
1191 		struct address_space *mapping = NULL;
1192 
1193 		if (!folio_test_set_dirty(folio)) {
1194 			mapping = folio->mapping;
1195 			if (mapping)
1196 				__folio_mark_dirty(folio, mapping, 0);
1197 		}
1198 		if (mapping)
1199 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1200 	}
1201 }
1202 EXPORT_SYMBOL(mark_buffer_dirty);
1203 
1204 void mark_buffer_write_io_error(struct buffer_head *bh)
1205 {
1206 	set_buffer_write_io_error(bh);
1207 	/* FIXME: do we need to set this in both places? */
1208 	if (bh->b_folio && bh->b_folio->mapping)
1209 		mapping_set_error(bh->b_folio->mapping, -EIO);
1210 	if (bh->b_assoc_map) {
1211 		mapping_set_error(bh->b_assoc_map, -EIO);
1212 		errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1213 	}
1214 }
1215 EXPORT_SYMBOL(mark_buffer_write_io_error);
1216 
1217 /**
1218  * __brelse - Release a buffer.
1219  * @bh: The buffer to release.
1220  *
1221  * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
1222  */
1223 void __brelse(struct buffer_head *bh)
1224 {
1225 	if (atomic_read(&bh->b_count)) {
1226 		put_bh(bh);
1227 		return;
1228 	}
1229 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1230 }
1231 EXPORT_SYMBOL(__brelse);
1232 
1233 /**
1234  * __bforget - Discard any dirty data in a buffer.
1235  * @bh: The buffer to forget.
1236  *
1237  * This variant of bforget() can be called if @bh is guaranteed to not
1238  * be NULL.
1239  */
1240 void __bforget(struct buffer_head *bh)
1241 {
1242 	clear_buffer_dirty(bh);
1243 	if (bh->b_assoc_map) {
1244 		struct address_space *buffer_mapping = bh->b_folio->mapping;
1245 
1246 		spin_lock(&buffer_mapping->i_private_lock);
1247 		list_del_init(&bh->b_assoc_buffers);
1248 		bh->b_assoc_map = NULL;
1249 		spin_unlock(&buffer_mapping->i_private_lock);
1250 	}
1251 	__brelse(bh);
1252 }
1253 EXPORT_SYMBOL(__bforget);
1254 
1255 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1256 {
1257 	lock_buffer(bh);
1258 	if (buffer_uptodate(bh)) {
1259 		unlock_buffer(bh);
1260 		return bh;
1261 	} else {
1262 		get_bh(bh);
1263 		bh->b_end_io = end_buffer_read_sync;
1264 		submit_bh(REQ_OP_READ, bh);
1265 		wait_on_buffer(bh);
1266 		if (buffer_uptodate(bh))
1267 			return bh;
1268 	}
1269 	brelse(bh);
1270 	return NULL;
1271 }
1272 
1273 /*
1274  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1275  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1276  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1277  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1278  * CPU's LRUs at the same time.
1279  *
1280  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1281  * sb_find_get_block().
1282  *
1283  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1284  * a local interrupt disable for that.
1285  */
1286 
1287 #define BH_LRU_SIZE	16
1288 
1289 struct bh_lru {
1290 	struct buffer_head *bhs[BH_LRU_SIZE];
1291 };
1292 
1293 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1294 
1295 #ifdef CONFIG_SMP
1296 #define bh_lru_lock()	local_irq_disable()
1297 #define bh_lru_unlock()	local_irq_enable()
1298 #else
1299 #define bh_lru_lock()	preempt_disable()
1300 #define bh_lru_unlock()	preempt_enable()
1301 #endif
1302 
1303 static inline void check_irqs_on(void)
1304 {
1305 #ifdef irqs_disabled
1306 	BUG_ON(irqs_disabled());
1307 #endif
1308 }
1309 
1310 /*
1311  * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1312  * inserted at the front, and the buffer_head at the back if any is evicted.
1313  * Or, if already in the LRU it is moved to the front.
1314  */
1315 static void bh_lru_install(struct buffer_head *bh)
1316 {
1317 	struct buffer_head *evictee = bh;
1318 	struct bh_lru *b;
1319 	int i;
1320 
1321 	check_irqs_on();
1322 	bh_lru_lock();
1323 
1324 	/*
1325 	 * the refcount of buffer_head in bh_lru prevents dropping the
1326 	 * attached page(i.e., try_to_free_buffers) so it could cause
1327 	 * failing page migration.
1328 	 * Skip putting upcoming bh into bh_lru until migration is done.
1329 	 */
1330 	if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1331 		bh_lru_unlock();
1332 		return;
1333 	}
1334 
1335 	b = this_cpu_ptr(&bh_lrus);
1336 	for (i = 0; i < BH_LRU_SIZE; i++) {
1337 		swap(evictee, b->bhs[i]);
1338 		if (evictee == bh) {
1339 			bh_lru_unlock();
1340 			return;
1341 		}
1342 	}
1343 
1344 	get_bh(bh);
1345 	bh_lru_unlock();
1346 	brelse(evictee);
1347 }
1348 
1349 /*
1350  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1351  */
1352 static struct buffer_head *
1353 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1354 {
1355 	struct buffer_head *ret = NULL;
1356 	unsigned int i;
1357 
1358 	check_irqs_on();
1359 	bh_lru_lock();
1360 	if (cpu_is_isolated(smp_processor_id())) {
1361 		bh_lru_unlock();
1362 		return NULL;
1363 	}
1364 	for (i = 0; i < BH_LRU_SIZE; i++) {
1365 		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1366 
1367 		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1368 		    bh->b_size == size) {
1369 			if (i) {
1370 				while (i) {
1371 					__this_cpu_write(bh_lrus.bhs[i],
1372 						__this_cpu_read(bh_lrus.bhs[i - 1]));
1373 					i--;
1374 				}
1375 				__this_cpu_write(bh_lrus.bhs[0], bh);
1376 			}
1377 			get_bh(bh);
1378 			ret = bh;
1379 			break;
1380 		}
1381 	}
1382 	bh_lru_unlock();
1383 	return ret;
1384 }
1385 
1386 /*
1387  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1388  * it in the LRU and mark it as accessed.  If it is not present then return
1389  * NULL
1390  */
1391 struct buffer_head *
1392 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1393 {
1394 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1395 
1396 	if (bh == NULL) {
1397 		/* __find_get_block_slow will mark the page accessed */
1398 		bh = __find_get_block_slow(bdev, block);
1399 		if (bh)
1400 			bh_lru_install(bh);
1401 	} else
1402 		touch_buffer(bh);
1403 
1404 	return bh;
1405 }
1406 EXPORT_SYMBOL(__find_get_block);
1407 
1408 /**
1409  * bdev_getblk - Get a buffer_head in a block device's buffer cache.
1410  * @bdev: The block device.
1411  * @block: The block number.
1412  * @size: The size of buffer_heads for this @bdev.
1413  * @gfp: The memory allocation flags to use.
1414  *
1415  * The returned buffer head has its reference count incremented, but is
1416  * not locked.  The caller should call brelse() when it has finished
1417  * with the buffer.  The buffer may not be uptodate.  If needed, the
1418  * caller can bring it uptodate either by reading it or overwriting it.
1419  *
1420  * Return: The buffer head, or NULL if memory could not be allocated.
1421  */
1422 struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
1423 		unsigned size, gfp_t gfp)
1424 {
1425 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1426 
1427 	might_alloc(gfp);
1428 	if (bh)
1429 		return bh;
1430 
1431 	return __getblk_slow(bdev, block, size, gfp);
1432 }
1433 EXPORT_SYMBOL(bdev_getblk);
1434 
1435 /*
1436  * Do async read-ahead on a buffer..
1437  */
1438 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1439 {
1440 	struct buffer_head *bh = bdev_getblk(bdev, block, size,
1441 			GFP_NOWAIT | __GFP_MOVABLE);
1442 
1443 	if (likely(bh)) {
1444 		bh_readahead(bh, REQ_RAHEAD);
1445 		brelse(bh);
1446 	}
1447 }
1448 EXPORT_SYMBOL(__breadahead);
1449 
1450 /**
1451  * __bread_gfp() - Read a block.
1452  * @bdev: The block device to read from.
1453  * @block: Block number in units of block size.
1454  * @size: The block size of this device in bytes.
1455  * @gfp: Not page allocation flags; see below.
1456  *
1457  * You are not expected to call this function.  You should use one of
1458  * sb_bread(), sb_bread_unmovable() or __bread().
1459  *
1460  * Read a specified block, and return the buffer head that refers to it.
1461  * If @gfp is 0, the memory will be allocated using the block device's
1462  * default GFP flags.  If @gfp is __GFP_MOVABLE, the memory may be
1463  * allocated from a movable area.  Do not pass in a complete set of
1464  * GFP flags.
1465  *
1466  * The returned buffer head has its refcount increased.  The caller should
1467  * call brelse() when it has finished with the buffer.
1468  *
1469  * Context: May sleep waiting for I/O.
1470  * Return: NULL if the block was unreadable.
1471  */
1472 struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
1473 		unsigned size, gfp_t gfp)
1474 {
1475 	struct buffer_head *bh;
1476 
1477 	gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
1478 
1479 	/*
1480 	 * Prefer looping in the allocator rather than here, at least that
1481 	 * code knows what it's doing.
1482 	 */
1483 	gfp |= __GFP_NOFAIL;
1484 
1485 	bh = bdev_getblk(bdev, block, size, gfp);
1486 
1487 	if (likely(bh) && !buffer_uptodate(bh))
1488 		bh = __bread_slow(bh);
1489 	return bh;
1490 }
1491 EXPORT_SYMBOL(__bread_gfp);
1492 
1493 static void __invalidate_bh_lrus(struct bh_lru *b)
1494 {
1495 	int i;
1496 
1497 	for (i = 0; i < BH_LRU_SIZE; i++) {
1498 		brelse(b->bhs[i]);
1499 		b->bhs[i] = NULL;
1500 	}
1501 }
1502 /*
1503  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1504  * This doesn't race because it runs in each cpu either in irq
1505  * or with preempt disabled.
1506  */
1507 static void invalidate_bh_lru(void *arg)
1508 {
1509 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1510 
1511 	__invalidate_bh_lrus(b);
1512 	put_cpu_var(bh_lrus);
1513 }
1514 
1515 bool has_bh_in_lru(int cpu, void *dummy)
1516 {
1517 	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1518 	int i;
1519 
1520 	for (i = 0; i < BH_LRU_SIZE; i++) {
1521 		if (b->bhs[i])
1522 			return true;
1523 	}
1524 
1525 	return false;
1526 }
1527 
1528 void invalidate_bh_lrus(void)
1529 {
1530 	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1531 }
1532 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1533 
1534 /*
1535  * It's called from workqueue context so we need a bh_lru_lock to close
1536  * the race with preemption/irq.
1537  */
1538 void invalidate_bh_lrus_cpu(void)
1539 {
1540 	struct bh_lru *b;
1541 
1542 	bh_lru_lock();
1543 	b = this_cpu_ptr(&bh_lrus);
1544 	__invalidate_bh_lrus(b);
1545 	bh_lru_unlock();
1546 }
1547 
1548 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1549 		  unsigned long offset)
1550 {
1551 	bh->b_folio = folio;
1552 	BUG_ON(offset >= folio_size(folio));
1553 	if (folio_test_highmem(folio))
1554 		/*
1555 		 * This catches illegal uses and preserves the offset:
1556 		 */
1557 		bh->b_data = (char *)(0 + offset);
1558 	else
1559 		bh->b_data = folio_address(folio) + offset;
1560 }
1561 EXPORT_SYMBOL(folio_set_bh);
1562 
1563 /*
1564  * Called when truncating a buffer on a page completely.
1565  */
1566 
1567 /* Bits that are cleared during an invalidate */
1568 #define BUFFER_FLAGS_DISCARD \
1569 	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1570 	 1 << BH_Delay | 1 << BH_Unwritten)
1571 
1572 static void discard_buffer(struct buffer_head * bh)
1573 {
1574 	unsigned long b_state;
1575 
1576 	lock_buffer(bh);
1577 	clear_buffer_dirty(bh);
1578 	bh->b_bdev = NULL;
1579 	b_state = READ_ONCE(bh->b_state);
1580 	do {
1581 	} while (!try_cmpxchg(&bh->b_state, &b_state,
1582 			      b_state & ~BUFFER_FLAGS_DISCARD));
1583 	unlock_buffer(bh);
1584 }
1585 
1586 /**
1587  * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1588  * @folio: The folio which is affected.
1589  * @offset: start of the range to invalidate
1590  * @length: length of the range to invalidate
1591  *
1592  * block_invalidate_folio() is called when all or part of the folio has been
1593  * invalidated by a truncate operation.
1594  *
1595  * block_invalidate_folio() does not have to release all buffers, but it must
1596  * ensure that no dirty buffer is left outside @offset and that no I/O
1597  * is underway against any of the blocks which are outside the truncation
1598  * point.  Because the caller is about to free (and possibly reuse) those
1599  * blocks on-disk.
1600  */
1601 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1602 {
1603 	struct buffer_head *head, *bh, *next;
1604 	size_t curr_off = 0;
1605 	size_t stop = length + offset;
1606 
1607 	BUG_ON(!folio_test_locked(folio));
1608 
1609 	/*
1610 	 * Check for overflow
1611 	 */
1612 	BUG_ON(stop > folio_size(folio) || stop < length);
1613 
1614 	head = folio_buffers(folio);
1615 	if (!head)
1616 		return;
1617 
1618 	bh = head;
1619 	do {
1620 		size_t next_off = curr_off + bh->b_size;
1621 		next = bh->b_this_page;
1622 
1623 		/*
1624 		 * Are we still fully in range ?
1625 		 */
1626 		if (next_off > stop)
1627 			goto out;
1628 
1629 		/*
1630 		 * is this block fully invalidated?
1631 		 */
1632 		if (offset <= curr_off)
1633 			discard_buffer(bh);
1634 		curr_off = next_off;
1635 		bh = next;
1636 	} while (bh != head);
1637 
1638 	/*
1639 	 * We release buffers only if the entire folio is being invalidated.
1640 	 * The get_block cached value has been unconditionally invalidated,
1641 	 * so real IO is not possible anymore.
1642 	 */
1643 	if (length == folio_size(folio))
1644 		filemap_release_folio(folio, 0);
1645 out:
1646 	folio_clear_mappedtodisk(folio);
1647 	return;
1648 }
1649 EXPORT_SYMBOL(block_invalidate_folio);
1650 
1651 /*
1652  * We attach and possibly dirty the buffers atomically wrt
1653  * block_dirty_folio() via i_private_lock.  try_to_free_buffers
1654  * is already excluded via the folio lock.
1655  */
1656 struct buffer_head *create_empty_buffers(struct folio *folio,
1657 		unsigned long blocksize, unsigned long b_state)
1658 {
1659 	struct buffer_head *bh, *head, *tail;
1660 	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT | __GFP_NOFAIL;
1661 
1662 	head = folio_alloc_buffers(folio, blocksize, gfp);
1663 	bh = head;
1664 	do {
1665 		bh->b_state |= b_state;
1666 		tail = bh;
1667 		bh = bh->b_this_page;
1668 	} while (bh);
1669 	tail->b_this_page = head;
1670 
1671 	spin_lock(&folio->mapping->i_private_lock);
1672 	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1673 		bh = head;
1674 		do {
1675 			if (folio_test_dirty(folio))
1676 				set_buffer_dirty(bh);
1677 			if (folio_test_uptodate(folio))
1678 				set_buffer_uptodate(bh);
1679 			bh = bh->b_this_page;
1680 		} while (bh != head);
1681 	}
1682 	folio_attach_private(folio, head);
1683 	spin_unlock(&folio->mapping->i_private_lock);
1684 
1685 	return head;
1686 }
1687 EXPORT_SYMBOL(create_empty_buffers);
1688 
1689 /**
1690  * clean_bdev_aliases: clean a range of buffers in block device
1691  * @bdev: Block device to clean buffers in
1692  * @block: Start of a range of blocks to clean
1693  * @len: Number of blocks to clean
1694  *
1695  * We are taking a range of blocks for data and we don't want writeback of any
1696  * buffer-cache aliases starting from return from this function and until the
1697  * moment when something will explicitly mark the buffer dirty (hopefully that
1698  * will not happen until we will free that block ;-) We don't even need to mark
1699  * it not-uptodate - nobody can expect anything from a newly allocated buffer
1700  * anyway. We used to use unmap_buffer() for such invalidation, but that was
1701  * wrong. We definitely don't want to mark the alias unmapped, for example - it
1702  * would confuse anyone who might pick it with bread() afterwards...
1703  *
1704  * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1705  * writeout I/O going on against recently-freed buffers.  We don't wait on that
1706  * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1707  * need to.  That happens here.
1708  */
1709 void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1710 {
1711 	struct address_space *bd_mapping = bdev->bd_mapping;
1712 	const int blkbits = bd_mapping->host->i_blkbits;
1713 	struct folio_batch fbatch;
1714 	pgoff_t index = ((loff_t)block << blkbits) / PAGE_SIZE;
1715 	pgoff_t end;
1716 	int i, count;
1717 	struct buffer_head *bh;
1718 	struct buffer_head *head;
1719 
1720 	end = ((loff_t)(block + len - 1) << blkbits) / PAGE_SIZE;
1721 	folio_batch_init(&fbatch);
1722 	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1723 		count = folio_batch_count(&fbatch);
1724 		for (i = 0; i < count; i++) {
1725 			struct folio *folio = fbatch.folios[i];
1726 
1727 			if (!folio_buffers(folio))
1728 				continue;
1729 			/*
1730 			 * We use folio lock instead of bd_mapping->i_private_lock
1731 			 * to pin buffers here since we can afford to sleep and
1732 			 * it scales better than a global spinlock lock.
1733 			 */
1734 			folio_lock(folio);
1735 			/* Recheck when the folio is locked which pins bhs */
1736 			head = folio_buffers(folio);
1737 			if (!head)
1738 				goto unlock_page;
1739 			bh = head;
1740 			do {
1741 				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1742 					goto next;
1743 				if (bh->b_blocknr >= block + len)
1744 					break;
1745 				clear_buffer_dirty(bh);
1746 				wait_on_buffer(bh);
1747 				clear_buffer_req(bh);
1748 next:
1749 				bh = bh->b_this_page;
1750 			} while (bh != head);
1751 unlock_page:
1752 			folio_unlock(folio);
1753 		}
1754 		folio_batch_release(&fbatch);
1755 		cond_resched();
1756 		/* End of range already reached? */
1757 		if (index > end || !index)
1758 			break;
1759 	}
1760 }
1761 EXPORT_SYMBOL(clean_bdev_aliases);
1762 
1763 static struct buffer_head *folio_create_buffers(struct folio *folio,
1764 						struct inode *inode,
1765 						unsigned int b_state)
1766 {
1767 	struct buffer_head *bh;
1768 
1769 	BUG_ON(!folio_test_locked(folio));
1770 
1771 	bh = folio_buffers(folio);
1772 	if (!bh)
1773 		bh = create_empty_buffers(folio,
1774 				1 << READ_ONCE(inode->i_blkbits), b_state);
1775 	return bh;
1776 }
1777 
1778 /*
1779  * NOTE! All mapped/uptodate combinations are valid:
1780  *
1781  *	Mapped	Uptodate	Meaning
1782  *
1783  *	No	No		"unknown" - must do get_block()
1784  *	No	Yes		"hole" - zero-filled
1785  *	Yes	No		"allocated" - allocated on disk, not read in
1786  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1787  *
1788  * "Dirty" is valid only with the last case (mapped+uptodate).
1789  */
1790 
1791 /*
1792  * While block_write_full_folio is writing back the dirty buffers under
1793  * the page lock, whoever dirtied the buffers may decide to clean them
1794  * again at any time.  We handle that by only looking at the buffer
1795  * state inside lock_buffer().
1796  *
1797  * If block_write_full_folio() is called for regular writeback
1798  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1799  * locked buffer.   This only can happen if someone has written the buffer
1800  * directly, with submit_bh().  At the address_space level PageWriteback
1801  * prevents this contention from occurring.
1802  *
1803  * If block_write_full_folio() is called with wbc->sync_mode ==
1804  * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1805  * causes the writes to be flagged as synchronous writes.
1806  */
1807 int __block_write_full_folio(struct inode *inode, struct folio *folio,
1808 			get_block_t *get_block, struct writeback_control *wbc)
1809 {
1810 	int err;
1811 	sector_t block;
1812 	sector_t last_block;
1813 	struct buffer_head *bh, *head;
1814 	size_t blocksize;
1815 	int nr_underway = 0;
1816 	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1817 
1818 	head = folio_create_buffers(folio, inode,
1819 				    (1 << BH_Dirty) | (1 << BH_Uptodate));
1820 
1821 	/*
1822 	 * Be very careful.  We have no exclusion from block_dirty_folio
1823 	 * here, and the (potentially unmapped) buffers may become dirty at
1824 	 * any time.  If a buffer becomes dirty here after we've inspected it
1825 	 * then we just miss that fact, and the folio stays dirty.
1826 	 *
1827 	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1828 	 * handle that here by just cleaning them.
1829 	 */
1830 
1831 	bh = head;
1832 	blocksize = bh->b_size;
1833 
1834 	block = div_u64(folio_pos(folio), blocksize);
1835 	last_block = div_u64(i_size_read(inode) - 1, blocksize);
1836 
1837 	/*
1838 	 * Get all the dirty buffers mapped to disk addresses and
1839 	 * handle any aliases from the underlying blockdev's mapping.
1840 	 */
1841 	do {
1842 		if (block > last_block) {
1843 			/*
1844 			 * mapped buffers outside i_size will occur, because
1845 			 * this folio can be outside i_size when there is a
1846 			 * truncate in progress.
1847 			 */
1848 			/*
1849 			 * The buffer was zeroed by block_write_full_folio()
1850 			 */
1851 			clear_buffer_dirty(bh);
1852 			set_buffer_uptodate(bh);
1853 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1854 			   buffer_dirty(bh)) {
1855 			WARN_ON(bh->b_size != blocksize);
1856 			err = get_block(inode, block, bh, 1);
1857 			if (err)
1858 				goto recover;
1859 			clear_buffer_delay(bh);
1860 			if (buffer_new(bh)) {
1861 				/* blockdev mappings never come here */
1862 				clear_buffer_new(bh);
1863 				clean_bdev_bh_alias(bh);
1864 			}
1865 		}
1866 		bh = bh->b_this_page;
1867 		block++;
1868 	} while (bh != head);
1869 
1870 	do {
1871 		if (!buffer_mapped(bh))
1872 			continue;
1873 		/*
1874 		 * If it's a fully non-blocking write attempt and we cannot
1875 		 * lock the buffer then redirty the folio.  Note that this can
1876 		 * potentially cause a busy-wait loop from writeback threads
1877 		 * and kswapd activity, but those code paths have their own
1878 		 * higher-level throttling.
1879 		 */
1880 		if (wbc->sync_mode != WB_SYNC_NONE) {
1881 			lock_buffer(bh);
1882 		} else if (!trylock_buffer(bh)) {
1883 			folio_redirty_for_writepage(wbc, folio);
1884 			continue;
1885 		}
1886 		if (test_clear_buffer_dirty(bh)) {
1887 			mark_buffer_async_write_endio(bh,
1888 				end_buffer_async_write);
1889 		} else {
1890 			unlock_buffer(bh);
1891 		}
1892 	} while ((bh = bh->b_this_page) != head);
1893 
1894 	/*
1895 	 * The folio and its buffers are protected by the writeback flag,
1896 	 * so we can drop the bh refcounts early.
1897 	 */
1898 	BUG_ON(folio_test_writeback(folio));
1899 	folio_start_writeback(folio);
1900 
1901 	do {
1902 		struct buffer_head *next = bh->b_this_page;
1903 		if (buffer_async_write(bh)) {
1904 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1905 				      inode->i_write_hint, wbc);
1906 			nr_underway++;
1907 		}
1908 		bh = next;
1909 	} while (bh != head);
1910 	folio_unlock(folio);
1911 
1912 	err = 0;
1913 done:
1914 	if (nr_underway == 0) {
1915 		/*
1916 		 * The folio was marked dirty, but the buffers were
1917 		 * clean.  Someone wrote them back by hand with
1918 		 * write_dirty_buffer/submit_bh.  A rare case.
1919 		 */
1920 		folio_end_writeback(folio);
1921 
1922 		/*
1923 		 * The folio and buffer_heads can be released at any time from
1924 		 * here on.
1925 		 */
1926 	}
1927 	return err;
1928 
1929 recover:
1930 	/*
1931 	 * ENOSPC, or some other error.  We may already have added some
1932 	 * blocks to the file, so we need to write these out to avoid
1933 	 * exposing stale data.
1934 	 * The folio is currently locked and not marked for writeback
1935 	 */
1936 	bh = head;
1937 	/* Recovery: lock and submit the mapped buffers */
1938 	do {
1939 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1940 		    !buffer_delay(bh)) {
1941 			lock_buffer(bh);
1942 			mark_buffer_async_write_endio(bh,
1943 				end_buffer_async_write);
1944 		} else {
1945 			/*
1946 			 * The buffer may have been set dirty during
1947 			 * attachment to a dirty folio.
1948 			 */
1949 			clear_buffer_dirty(bh);
1950 		}
1951 	} while ((bh = bh->b_this_page) != head);
1952 	BUG_ON(folio_test_writeback(folio));
1953 	mapping_set_error(folio->mapping, err);
1954 	folio_start_writeback(folio);
1955 	do {
1956 		struct buffer_head *next = bh->b_this_page;
1957 		if (buffer_async_write(bh)) {
1958 			clear_buffer_dirty(bh);
1959 			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh,
1960 				      inode->i_write_hint, wbc);
1961 			nr_underway++;
1962 		}
1963 		bh = next;
1964 	} while (bh != head);
1965 	folio_unlock(folio);
1966 	goto done;
1967 }
1968 EXPORT_SYMBOL(__block_write_full_folio);
1969 
1970 /*
1971  * If a folio has any new buffers, zero them out here, and mark them uptodate
1972  * and dirty so they'll be written out (in order to prevent uninitialised
1973  * block data from leaking). And clear the new bit.
1974  */
1975 void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1976 {
1977 	size_t block_start, block_end;
1978 	struct buffer_head *head, *bh;
1979 
1980 	BUG_ON(!folio_test_locked(folio));
1981 	head = folio_buffers(folio);
1982 	if (!head)
1983 		return;
1984 
1985 	bh = head;
1986 	block_start = 0;
1987 	do {
1988 		block_end = block_start + bh->b_size;
1989 
1990 		if (buffer_new(bh)) {
1991 			if (block_end > from && block_start < to) {
1992 				if (!folio_test_uptodate(folio)) {
1993 					size_t start, xend;
1994 
1995 					start = max(from, block_start);
1996 					xend = min(to, block_end);
1997 
1998 					folio_zero_segment(folio, start, xend);
1999 					set_buffer_uptodate(bh);
2000 				}
2001 
2002 				clear_buffer_new(bh);
2003 				mark_buffer_dirty(bh);
2004 			}
2005 		}
2006 
2007 		block_start = block_end;
2008 		bh = bh->b_this_page;
2009 	} while (bh != head);
2010 }
2011 EXPORT_SYMBOL(folio_zero_new_buffers);
2012 
2013 static int
2014 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2015 		const struct iomap *iomap)
2016 {
2017 	loff_t offset = (loff_t)block << inode->i_blkbits;
2018 
2019 	bh->b_bdev = iomap->bdev;
2020 
2021 	/*
2022 	 * Block points to offset in file we need to map, iomap contains
2023 	 * the offset at which the map starts. If the map ends before the
2024 	 * current block, then do not map the buffer and let the caller
2025 	 * handle it.
2026 	 */
2027 	if (offset >= iomap->offset + iomap->length)
2028 		return -EIO;
2029 
2030 	switch (iomap->type) {
2031 	case IOMAP_HOLE:
2032 		/*
2033 		 * If the buffer is not up to date or beyond the current EOF,
2034 		 * we need to mark it as new to ensure sub-block zeroing is
2035 		 * executed if necessary.
2036 		 */
2037 		if (!buffer_uptodate(bh) ||
2038 		    (offset >= i_size_read(inode)))
2039 			set_buffer_new(bh);
2040 		return 0;
2041 	case IOMAP_DELALLOC:
2042 		if (!buffer_uptodate(bh) ||
2043 		    (offset >= i_size_read(inode)))
2044 			set_buffer_new(bh);
2045 		set_buffer_uptodate(bh);
2046 		set_buffer_mapped(bh);
2047 		set_buffer_delay(bh);
2048 		return 0;
2049 	case IOMAP_UNWRITTEN:
2050 		/*
2051 		 * For unwritten regions, we always need to ensure that regions
2052 		 * in the block we are not writing to are zeroed. Mark the
2053 		 * buffer as new to ensure this.
2054 		 */
2055 		set_buffer_new(bh);
2056 		set_buffer_unwritten(bh);
2057 		fallthrough;
2058 	case IOMAP_MAPPED:
2059 		if ((iomap->flags & IOMAP_F_NEW) ||
2060 		    offset >= i_size_read(inode)) {
2061 			/*
2062 			 * This can happen if truncating the block device races
2063 			 * with the check in the caller as i_size updates on
2064 			 * block devices aren't synchronized by i_rwsem for
2065 			 * block devices.
2066 			 */
2067 			if (S_ISBLK(inode->i_mode))
2068 				return -EIO;
2069 			set_buffer_new(bh);
2070 		}
2071 		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2072 				inode->i_blkbits;
2073 		set_buffer_mapped(bh);
2074 		return 0;
2075 	default:
2076 		WARN_ON_ONCE(1);
2077 		return -EIO;
2078 	}
2079 }
2080 
2081 int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2082 		get_block_t *get_block, const struct iomap *iomap)
2083 {
2084 	size_t from = offset_in_folio(folio, pos);
2085 	size_t to = from + len;
2086 	struct inode *inode = folio->mapping->host;
2087 	size_t block_start, block_end;
2088 	sector_t block;
2089 	int err = 0;
2090 	size_t blocksize;
2091 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2092 
2093 	BUG_ON(!folio_test_locked(folio));
2094 	BUG_ON(to > folio_size(folio));
2095 	BUG_ON(from > to);
2096 
2097 	head = folio_create_buffers(folio, inode, 0);
2098 	blocksize = head->b_size;
2099 	block = div_u64(folio_pos(folio), blocksize);
2100 
2101 	for (bh = head, block_start = 0; bh != head || !block_start;
2102 	    block++, block_start=block_end, bh = bh->b_this_page) {
2103 		block_end = block_start + blocksize;
2104 		if (block_end <= from || block_start >= to) {
2105 			if (folio_test_uptodate(folio)) {
2106 				if (!buffer_uptodate(bh))
2107 					set_buffer_uptodate(bh);
2108 			}
2109 			continue;
2110 		}
2111 		if (buffer_new(bh))
2112 			clear_buffer_new(bh);
2113 		if (!buffer_mapped(bh)) {
2114 			WARN_ON(bh->b_size != blocksize);
2115 			if (get_block)
2116 				err = get_block(inode, block, bh, 1);
2117 			else
2118 				err = iomap_to_bh(inode, block, bh, iomap);
2119 			if (err)
2120 				break;
2121 
2122 			if (buffer_new(bh)) {
2123 				clean_bdev_bh_alias(bh);
2124 				if (folio_test_uptodate(folio)) {
2125 					clear_buffer_new(bh);
2126 					set_buffer_uptodate(bh);
2127 					mark_buffer_dirty(bh);
2128 					continue;
2129 				}
2130 				if (block_end > to || block_start < from)
2131 					folio_zero_segments(folio,
2132 						to, block_end,
2133 						block_start, from);
2134 				continue;
2135 			}
2136 		}
2137 		if (folio_test_uptodate(folio)) {
2138 			if (!buffer_uptodate(bh))
2139 				set_buffer_uptodate(bh);
2140 			continue;
2141 		}
2142 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2143 		    !buffer_unwritten(bh) &&
2144 		     (block_start < from || block_end > to)) {
2145 			bh_read_nowait(bh, 0);
2146 			*wait_bh++=bh;
2147 		}
2148 	}
2149 	/*
2150 	 * If we issued read requests - let them complete.
2151 	 */
2152 	while(wait_bh > wait) {
2153 		wait_on_buffer(*--wait_bh);
2154 		if (!buffer_uptodate(*wait_bh))
2155 			err = -EIO;
2156 	}
2157 	if (unlikely(err))
2158 		folio_zero_new_buffers(folio, from, to);
2159 	return err;
2160 }
2161 
2162 int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
2163 		get_block_t *get_block)
2164 {
2165 	return __block_write_begin_int(folio, pos, len, get_block, NULL);
2166 }
2167 EXPORT_SYMBOL(__block_write_begin);
2168 
2169 void block_commit_write(struct folio *folio, size_t from, size_t to)
2170 {
2171 	size_t block_start, block_end;
2172 	bool partial = false;
2173 	unsigned blocksize;
2174 	struct buffer_head *bh, *head;
2175 
2176 	bh = head = folio_buffers(folio);
2177 	if (!bh)
2178 		return;
2179 	blocksize = bh->b_size;
2180 
2181 	block_start = 0;
2182 	do {
2183 		block_end = block_start + blocksize;
2184 		if (block_end <= from || block_start >= to) {
2185 			if (!buffer_uptodate(bh))
2186 				partial = true;
2187 		} else {
2188 			set_buffer_uptodate(bh);
2189 			mark_buffer_dirty(bh);
2190 		}
2191 		if (buffer_new(bh))
2192 			clear_buffer_new(bh);
2193 
2194 		block_start = block_end;
2195 		bh = bh->b_this_page;
2196 	} while (bh != head);
2197 
2198 	/*
2199 	 * If this is a partial write which happened to make all buffers
2200 	 * uptodate then we can optimize away a bogus read_folio() for
2201 	 * the next read(). Here we 'discover' whether the folio went
2202 	 * uptodate as a result of this (potentially partial) write.
2203 	 */
2204 	if (!partial)
2205 		folio_mark_uptodate(folio);
2206 }
2207 EXPORT_SYMBOL(block_commit_write);
2208 
2209 /*
2210  * block_write_begin takes care of the basic task of block allocation and
2211  * bringing partial write blocks uptodate first.
2212  *
2213  * The filesystem needs to handle block truncation upon failure.
2214  */
2215 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2216 		struct folio **foliop, get_block_t *get_block)
2217 {
2218 	pgoff_t index = pos >> PAGE_SHIFT;
2219 	struct folio *folio;
2220 	int status;
2221 
2222 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
2223 			mapping_gfp_mask(mapping));
2224 	if (IS_ERR(folio))
2225 		return PTR_ERR(folio);
2226 
2227 	status = __block_write_begin_int(folio, pos, len, get_block, NULL);
2228 	if (unlikely(status)) {
2229 		folio_unlock(folio);
2230 		folio_put(folio);
2231 		folio = NULL;
2232 	}
2233 
2234 	*foliop = folio;
2235 	return status;
2236 }
2237 EXPORT_SYMBOL(block_write_begin);
2238 
2239 int block_write_end(struct file *file, struct address_space *mapping,
2240 			loff_t pos, unsigned len, unsigned copied,
2241 			struct folio *folio, void *fsdata)
2242 {
2243 	size_t start = pos - folio_pos(folio);
2244 
2245 	if (unlikely(copied < len)) {
2246 		/*
2247 		 * The buffers that were written will now be uptodate, so
2248 		 * we don't have to worry about a read_folio reading them
2249 		 * and overwriting a partial write. However if we have
2250 		 * encountered a short write and only partially written
2251 		 * into a buffer, it will not be marked uptodate, so a
2252 		 * read_folio might come in and destroy our partial write.
2253 		 *
2254 		 * Do the simplest thing, and just treat any short write to a
2255 		 * non uptodate folio as a zero-length write, and force the
2256 		 * caller to redo the whole thing.
2257 		 */
2258 		if (!folio_test_uptodate(folio))
2259 			copied = 0;
2260 
2261 		folio_zero_new_buffers(folio, start+copied, start+len);
2262 	}
2263 	flush_dcache_folio(folio);
2264 
2265 	/* This could be a short (even 0-length) commit */
2266 	block_commit_write(folio, start, start + copied);
2267 
2268 	return copied;
2269 }
2270 EXPORT_SYMBOL(block_write_end);
2271 
2272 int generic_write_end(struct file *file, struct address_space *mapping,
2273 			loff_t pos, unsigned len, unsigned copied,
2274 			struct folio *folio, void *fsdata)
2275 {
2276 	struct inode *inode = mapping->host;
2277 	loff_t old_size = inode->i_size;
2278 	bool i_size_changed = false;
2279 
2280 	copied = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
2281 
2282 	/*
2283 	 * No need to use i_size_read() here, the i_size cannot change under us
2284 	 * because we hold i_rwsem.
2285 	 *
2286 	 * But it's important to update i_size while still holding folio lock:
2287 	 * page writeout could otherwise come in and zero beyond i_size.
2288 	 */
2289 	if (pos + copied > inode->i_size) {
2290 		i_size_write(inode, pos + copied);
2291 		i_size_changed = true;
2292 	}
2293 
2294 	folio_unlock(folio);
2295 	folio_put(folio);
2296 
2297 	if (old_size < pos)
2298 		pagecache_isize_extended(inode, old_size, pos);
2299 	/*
2300 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2301 	 * makes the holding time of page lock longer. Second, it forces lock
2302 	 * ordering of page lock and transaction start for journaling
2303 	 * filesystems.
2304 	 */
2305 	if (i_size_changed)
2306 		mark_inode_dirty(inode);
2307 	return copied;
2308 }
2309 EXPORT_SYMBOL(generic_write_end);
2310 
2311 /*
2312  * block_is_partially_uptodate checks whether buffers within a folio are
2313  * uptodate or not.
2314  *
2315  * Returns true if all buffers which correspond to the specified part
2316  * of the folio are uptodate.
2317  */
2318 bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2319 {
2320 	unsigned block_start, block_end, blocksize;
2321 	unsigned to;
2322 	struct buffer_head *bh, *head;
2323 	bool ret = true;
2324 
2325 	head = folio_buffers(folio);
2326 	if (!head)
2327 		return false;
2328 	blocksize = head->b_size;
2329 	to = min_t(unsigned, folio_size(folio) - from, count);
2330 	to = from + to;
2331 	if (from < blocksize && to > folio_size(folio) - blocksize)
2332 		return false;
2333 
2334 	bh = head;
2335 	block_start = 0;
2336 	do {
2337 		block_end = block_start + blocksize;
2338 		if (block_end > from && block_start < to) {
2339 			if (!buffer_uptodate(bh)) {
2340 				ret = false;
2341 				break;
2342 			}
2343 			if (block_end >= to)
2344 				break;
2345 		}
2346 		block_start = block_end;
2347 		bh = bh->b_this_page;
2348 	} while (bh != head);
2349 
2350 	return ret;
2351 }
2352 EXPORT_SYMBOL(block_is_partially_uptodate);
2353 
2354 /*
2355  * Generic "read_folio" function for block devices that have the normal
2356  * get_block functionality. This is most of the block device filesystems.
2357  * Reads the folio asynchronously --- the unlock_buffer() and
2358  * set/clear_buffer_uptodate() functions propagate buffer state into the
2359  * folio once IO has completed.
2360  */
2361 int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2362 {
2363 	struct inode *inode = folio->mapping->host;
2364 	sector_t iblock, lblock;
2365 	struct buffer_head *bh, *head, *prev = NULL;
2366 	size_t blocksize;
2367 	int fully_mapped = 1;
2368 	bool page_error = false;
2369 	loff_t limit = i_size_read(inode);
2370 
2371 	/* This is needed for ext4. */
2372 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2373 		limit = inode->i_sb->s_maxbytes;
2374 
2375 	head = folio_create_buffers(folio, inode, 0);
2376 	blocksize = head->b_size;
2377 
2378 	iblock = div_u64(folio_pos(folio), blocksize);
2379 	lblock = div_u64(limit + blocksize - 1, blocksize);
2380 	bh = head;
2381 
2382 	do {
2383 		if (buffer_uptodate(bh))
2384 			continue;
2385 
2386 		if (!buffer_mapped(bh)) {
2387 			int err = 0;
2388 
2389 			fully_mapped = 0;
2390 			if (iblock < lblock) {
2391 				WARN_ON(bh->b_size != blocksize);
2392 				err = get_block(inode, iblock, bh, 0);
2393 				if (err)
2394 					page_error = true;
2395 			}
2396 			if (!buffer_mapped(bh)) {
2397 				folio_zero_range(folio, bh_offset(bh),
2398 						blocksize);
2399 				if (!err)
2400 					set_buffer_uptodate(bh);
2401 				continue;
2402 			}
2403 			/*
2404 			 * get_block() might have updated the buffer
2405 			 * synchronously
2406 			 */
2407 			if (buffer_uptodate(bh))
2408 				continue;
2409 		}
2410 
2411 		lock_buffer(bh);
2412 		if (buffer_uptodate(bh)) {
2413 			unlock_buffer(bh);
2414 			continue;
2415 		}
2416 
2417 		mark_buffer_async_read(bh);
2418 		if (prev)
2419 			submit_bh(REQ_OP_READ, prev);
2420 		prev = bh;
2421 	} while (iblock++, (bh = bh->b_this_page) != head);
2422 
2423 	if (fully_mapped)
2424 		folio_set_mappedtodisk(folio);
2425 
2426 	/*
2427 	 * All buffers are uptodate or get_block() returned an error
2428 	 * when trying to map them - we must finish the read because
2429 	 * end_buffer_async_read() will never be called on any buffer
2430 	 * in this folio.
2431 	 */
2432 	if (prev)
2433 		submit_bh(REQ_OP_READ, prev);
2434 	else
2435 		folio_end_read(folio, !page_error);
2436 
2437 	return 0;
2438 }
2439 EXPORT_SYMBOL(block_read_full_folio);
2440 
2441 /* utility function for filesystems that need to do work on expanding
2442  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2443  * deal with the hole.
2444  */
2445 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2446 {
2447 	struct address_space *mapping = inode->i_mapping;
2448 	const struct address_space_operations *aops = mapping->a_ops;
2449 	struct folio *folio;
2450 	void *fsdata = NULL;
2451 	int err;
2452 
2453 	err = inode_newsize_ok(inode, size);
2454 	if (err)
2455 		goto out;
2456 
2457 	err = aops->write_begin(NULL, mapping, size, 0, &folio, &fsdata);
2458 	if (err)
2459 		goto out;
2460 
2461 	err = aops->write_end(NULL, mapping, size, 0, 0, folio, fsdata);
2462 	BUG_ON(err > 0);
2463 
2464 out:
2465 	return err;
2466 }
2467 EXPORT_SYMBOL(generic_cont_expand_simple);
2468 
2469 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2470 			    loff_t pos, loff_t *bytes)
2471 {
2472 	struct inode *inode = mapping->host;
2473 	const struct address_space_operations *aops = mapping->a_ops;
2474 	unsigned int blocksize = i_blocksize(inode);
2475 	struct folio *folio;
2476 	void *fsdata = NULL;
2477 	pgoff_t index, curidx;
2478 	loff_t curpos;
2479 	unsigned zerofrom, offset, len;
2480 	int err = 0;
2481 
2482 	index = pos >> PAGE_SHIFT;
2483 	offset = pos & ~PAGE_MASK;
2484 
2485 	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2486 		zerofrom = curpos & ~PAGE_MASK;
2487 		if (zerofrom & (blocksize-1)) {
2488 			*bytes |= (blocksize-1);
2489 			(*bytes)++;
2490 		}
2491 		len = PAGE_SIZE - zerofrom;
2492 
2493 		err = aops->write_begin(file, mapping, curpos, len,
2494 					    &folio, &fsdata);
2495 		if (err)
2496 			goto out;
2497 		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2498 		err = aops->write_end(file, mapping, curpos, len, len,
2499 						folio, fsdata);
2500 		if (err < 0)
2501 			goto out;
2502 		BUG_ON(err != len);
2503 		err = 0;
2504 
2505 		balance_dirty_pages_ratelimited(mapping);
2506 
2507 		if (fatal_signal_pending(current)) {
2508 			err = -EINTR;
2509 			goto out;
2510 		}
2511 	}
2512 
2513 	/* page covers the boundary, find the boundary offset */
2514 	if (index == curidx) {
2515 		zerofrom = curpos & ~PAGE_MASK;
2516 		/* if we will expand the thing last block will be filled */
2517 		if (offset <= zerofrom) {
2518 			goto out;
2519 		}
2520 		if (zerofrom & (blocksize-1)) {
2521 			*bytes |= (blocksize-1);
2522 			(*bytes)++;
2523 		}
2524 		len = offset - zerofrom;
2525 
2526 		err = aops->write_begin(file, mapping, curpos, len,
2527 					    &folio, &fsdata);
2528 		if (err)
2529 			goto out;
2530 		folio_zero_range(folio, offset_in_folio(folio, curpos), len);
2531 		err = aops->write_end(file, mapping, curpos, len, len,
2532 						folio, fsdata);
2533 		if (err < 0)
2534 			goto out;
2535 		BUG_ON(err != len);
2536 		err = 0;
2537 	}
2538 out:
2539 	return err;
2540 }
2541 
2542 /*
2543  * For moronic filesystems that do not allow holes in file.
2544  * We may have to extend the file.
2545  */
2546 int cont_write_begin(struct file *file, struct address_space *mapping,
2547 			loff_t pos, unsigned len,
2548 			struct folio **foliop, void **fsdata,
2549 			get_block_t *get_block, loff_t *bytes)
2550 {
2551 	struct inode *inode = mapping->host;
2552 	unsigned int blocksize = i_blocksize(inode);
2553 	unsigned int zerofrom;
2554 	int err;
2555 
2556 	err = cont_expand_zero(file, mapping, pos, bytes);
2557 	if (err)
2558 		return err;
2559 
2560 	zerofrom = *bytes & ~PAGE_MASK;
2561 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2562 		*bytes |= (blocksize-1);
2563 		(*bytes)++;
2564 	}
2565 
2566 	return block_write_begin(mapping, pos, len, foliop, get_block);
2567 }
2568 EXPORT_SYMBOL(cont_write_begin);
2569 
2570 /*
2571  * block_page_mkwrite() is not allowed to change the file size as it gets
2572  * called from a page fault handler when a page is first dirtied. Hence we must
2573  * be careful to check for EOF conditions here. We set the page up correctly
2574  * for a written page which means we get ENOSPC checking when writing into
2575  * holes and correct delalloc and unwritten extent mapping on filesystems that
2576  * support these features.
2577  *
2578  * We are not allowed to take the i_mutex here so we have to play games to
2579  * protect against truncate races as the page could now be beyond EOF.  Because
2580  * truncate writes the inode size before removing pages, once we have the
2581  * page lock we can determine safely if the page is beyond EOF. If it is not
2582  * beyond EOF, then the page is guaranteed safe against truncation until we
2583  * unlock the page.
2584  *
2585  * Direct callers of this function should protect against filesystem freezing
2586  * using sb_start_pagefault() - sb_end_pagefault() functions.
2587  */
2588 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2589 			 get_block_t get_block)
2590 {
2591 	struct folio *folio = page_folio(vmf->page);
2592 	struct inode *inode = file_inode(vma->vm_file);
2593 	unsigned long end;
2594 	loff_t size;
2595 	int ret;
2596 
2597 	folio_lock(folio);
2598 	size = i_size_read(inode);
2599 	if ((folio->mapping != inode->i_mapping) ||
2600 	    (folio_pos(folio) >= size)) {
2601 		/* We overload EFAULT to mean page got truncated */
2602 		ret = -EFAULT;
2603 		goto out_unlock;
2604 	}
2605 
2606 	end = folio_size(folio);
2607 	/* folio is wholly or partially inside EOF */
2608 	if (folio_pos(folio) + end > size)
2609 		end = size - folio_pos(folio);
2610 
2611 	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2612 	if (unlikely(ret))
2613 		goto out_unlock;
2614 
2615 	block_commit_write(folio, 0, end);
2616 
2617 	folio_mark_dirty(folio);
2618 	folio_wait_stable(folio);
2619 	return 0;
2620 out_unlock:
2621 	folio_unlock(folio);
2622 	return ret;
2623 }
2624 EXPORT_SYMBOL(block_page_mkwrite);
2625 
2626 int block_truncate_page(struct address_space *mapping,
2627 			loff_t from, get_block_t *get_block)
2628 {
2629 	pgoff_t index = from >> PAGE_SHIFT;
2630 	unsigned blocksize;
2631 	sector_t iblock;
2632 	size_t offset, length, pos;
2633 	struct inode *inode = mapping->host;
2634 	struct folio *folio;
2635 	struct buffer_head *bh;
2636 	int err = 0;
2637 
2638 	blocksize = i_blocksize(inode);
2639 	length = from & (blocksize - 1);
2640 
2641 	/* Block boundary? Nothing to do */
2642 	if (!length)
2643 		return 0;
2644 
2645 	length = blocksize - length;
2646 	iblock = ((loff_t)index * PAGE_SIZE) >> inode->i_blkbits;
2647 
2648 	folio = filemap_grab_folio(mapping, index);
2649 	if (IS_ERR(folio))
2650 		return PTR_ERR(folio);
2651 
2652 	bh = folio_buffers(folio);
2653 	if (!bh)
2654 		bh = create_empty_buffers(folio, blocksize, 0);
2655 
2656 	/* Find the buffer that contains "offset" */
2657 	offset = offset_in_folio(folio, from);
2658 	pos = blocksize;
2659 	while (offset >= pos) {
2660 		bh = bh->b_this_page;
2661 		iblock++;
2662 		pos += blocksize;
2663 	}
2664 
2665 	if (!buffer_mapped(bh)) {
2666 		WARN_ON(bh->b_size != blocksize);
2667 		err = get_block(inode, iblock, bh, 0);
2668 		if (err)
2669 			goto unlock;
2670 		/* unmapped? It's a hole - nothing to do */
2671 		if (!buffer_mapped(bh))
2672 			goto unlock;
2673 	}
2674 
2675 	/* Ok, it's mapped. Make sure it's up-to-date */
2676 	if (folio_test_uptodate(folio))
2677 		set_buffer_uptodate(bh);
2678 
2679 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2680 		err = bh_read(bh, 0);
2681 		/* Uhhuh. Read error. Complain and punt. */
2682 		if (err < 0)
2683 			goto unlock;
2684 	}
2685 
2686 	folio_zero_range(folio, offset, length);
2687 	mark_buffer_dirty(bh);
2688 
2689 unlock:
2690 	folio_unlock(folio);
2691 	folio_put(folio);
2692 
2693 	return err;
2694 }
2695 EXPORT_SYMBOL(block_truncate_page);
2696 
2697 /*
2698  * The generic ->writepage function for buffer-backed address_spaces
2699  */
2700 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
2701 		void *get_block)
2702 {
2703 	struct inode * const inode = folio->mapping->host;
2704 	loff_t i_size = i_size_read(inode);
2705 
2706 	/* Is the folio fully inside i_size? */
2707 	if (folio_pos(folio) + folio_size(folio) <= i_size)
2708 		return __block_write_full_folio(inode, folio, get_block, wbc);
2709 
2710 	/* Is the folio fully outside i_size? (truncate in progress) */
2711 	if (folio_pos(folio) >= i_size) {
2712 		folio_unlock(folio);
2713 		return 0; /* don't care */
2714 	}
2715 
2716 	/*
2717 	 * The folio straddles i_size.  It must be zeroed out on each and every
2718 	 * writepage invocation because it may be mmapped.  "A file is mapped
2719 	 * in multiples of the page size.  For a file that is not a multiple of
2720 	 * the page size, the remaining memory is zeroed when mapped, and
2721 	 * writes to that region are not written out to the file."
2722 	 */
2723 	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2724 			folio_size(folio));
2725 	return __block_write_full_folio(inode, folio, get_block, wbc);
2726 }
2727 
2728 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2729 			    get_block_t *get_block)
2730 {
2731 	struct inode *inode = mapping->host;
2732 	struct buffer_head tmp = {
2733 		.b_size = i_blocksize(inode),
2734 	};
2735 
2736 	get_block(inode, block, &tmp, 0);
2737 	return tmp.b_blocknr;
2738 }
2739 EXPORT_SYMBOL(generic_block_bmap);
2740 
2741 static void end_bio_bh_io_sync(struct bio *bio)
2742 {
2743 	struct buffer_head *bh = bio->bi_private;
2744 
2745 	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2746 		set_bit(BH_Quiet, &bh->b_state);
2747 
2748 	bh->b_end_io(bh, !bio->bi_status);
2749 	bio_put(bio);
2750 }
2751 
2752 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2753 			  enum rw_hint write_hint,
2754 			  struct writeback_control *wbc)
2755 {
2756 	const enum req_op op = opf & REQ_OP_MASK;
2757 	struct bio *bio;
2758 
2759 	BUG_ON(!buffer_locked(bh));
2760 	BUG_ON(!buffer_mapped(bh));
2761 	BUG_ON(!bh->b_end_io);
2762 	BUG_ON(buffer_delay(bh));
2763 	BUG_ON(buffer_unwritten(bh));
2764 
2765 	/*
2766 	 * Only clear out a write error when rewriting
2767 	 */
2768 	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2769 		clear_buffer_write_io_error(bh);
2770 
2771 	if (buffer_meta(bh))
2772 		opf |= REQ_META;
2773 	if (buffer_prio(bh))
2774 		opf |= REQ_PRIO;
2775 
2776 	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2777 
2778 	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2779 
2780 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2781 	bio->bi_write_hint = write_hint;
2782 
2783 	bio_add_folio_nofail(bio, bh->b_folio, bh->b_size, bh_offset(bh));
2784 
2785 	bio->bi_end_io = end_bio_bh_io_sync;
2786 	bio->bi_private = bh;
2787 
2788 	/* Take care of bh's that straddle the end of the device */
2789 	guard_bio_eod(bio);
2790 
2791 	if (wbc) {
2792 		wbc_init_bio(wbc, bio);
2793 		wbc_account_cgroup_owner(wbc, bh->b_folio, bh->b_size);
2794 	}
2795 
2796 	submit_bio(bio);
2797 }
2798 
2799 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2800 {
2801 	submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL);
2802 }
2803 EXPORT_SYMBOL(submit_bh);
2804 
2805 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2806 {
2807 	lock_buffer(bh);
2808 	if (!test_clear_buffer_dirty(bh)) {
2809 		unlock_buffer(bh);
2810 		return;
2811 	}
2812 	bh->b_end_io = end_buffer_write_sync;
2813 	get_bh(bh);
2814 	submit_bh(REQ_OP_WRITE | op_flags, bh);
2815 }
2816 EXPORT_SYMBOL(write_dirty_buffer);
2817 
2818 /*
2819  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2820  * and then start new I/O and then wait upon it.  The caller must have a ref on
2821  * the buffer_head.
2822  */
2823 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2824 {
2825 	WARN_ON(atomic_read(&bh->b_count) < 1);
2826 	lock_buffer(bh);
2827 	if (test_clear_buffer_dirty(bh)) {
2828 		/*
2829 		 * The bh should be mapped, but it might not be if the
2830 		 * device was hot-removed. Not much we can do but fail the I/O.
2831 		 */
2832 		if (!buffer_mapped(bh)) {
2833 			unlock_buffer(bh);
2834 			return -EIO;
2835 		}
2836 
2837 		get_bh(bh);
2838 		bh->b_end_io = end_buffer_write_sync;
2839 		submit_bh(REQ_OP_WRITE | op_flags, bh);
2840 		wait_on_buffer(bh);
2841 		if (!buffer_uptodate(bh))
2842 			return -EIO;
2843 	} else {
2844 		unlock_buffer(bh);
2845 	}
2846 	return 0;
2847 }
2848 EXPORT_SYMBOL(__sync_dirty_buffer);
2849 
2850 int sync_dirty_buffer(struct buffer_head *bh)
2851 {
2852 	return __sync_dirty_buffer(bh, REQ_SYNC);
2853 }
2854 EXPORT_SYMBOL(sync_dirty_buffer);
2855 
2856 static inline int buffer_busy(struct buffer_head *bh)
2857 {
2858 	return atomic_read(&bh->b_count) |
2859 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2860 }
2861 
2862 static bool
2863 drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2864 {
2865 	struct buffer_head *head = folio_buffers(folio);
2866 	struct buffer_head *bh;
2867 
2868 	bh = head;
2869 	do {
2870 		if (buffer_busy(bh))
2871 			goto failed;
2872 		bh = bh->b_this_page;
2873 	} while (bh != head);
2874 
2875 	do {
2876 		struct buffer_head *next = bh->b_this_page;
2877 
2878 		if (bh->b_assoc_map)
2879 			__remove_assoc_queue(bh);
2880 		bh = next;
2881 	} while (bh != head);
2882 	*buffers_to_free = head;
2883 	folio_detach_private(folio);
2884 	return true;
2885 failed:
2886 	return false;
2887 }
2888 
2889 /**
2890  * try_to_free_buffers - Release buffers attached to this folio.
2891  * @folio: The folio.
2892  *
2893  * If any buffers are in use (dirty, under writeback, elevated refcount),
2894  * no buffers will be freed.
2895  *
2896  * If the folio is dirty but all the buffers are clean then we need to
2897  * be sure to mark the folio clean as well.  This is because the folio
2898  * may be against a block device, and a later reattachment of buffers
2899  * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2900  * filesystem data on the same device.
2901  *
2902  * The same applies to regular filesystem folios: if all the buffers are
2903  * clean then we set the folio clean and proceed.  To do that, we require
2904  * total exclusion from block_dirty_folio().  That is obtained with
2905  * i_private_lock.
2906  *
2907  * Exclusion against try_to_free_buffers may be obtained by either
2908  * locking the folio or by holding its mapping's i_private_lock.
2909  *
2910  * Context: Process context.  @folio must be locked.  Will not sleep.
2911  * Return: true if all buffers attached to this folio were freed.
2912  */
2913 bool try_to_free_buffers(struct folio *folio)
2914 {
2915 	struct address_space * const mapping = folio->mapping;
2916 	struct buffer_head *buffers_to_free = NULL;
2917 	bool ret = 0;
2918 
2919 	BUG_ON(!folio_test_locked(folio));
2920 	if (folio_test_writeback(folio))
2921 		return false;
2922 
2923 	if (mapping == NULL) {		/* can this still happen? */
2924 		ret = drop_buffers(folio, &buffers_to_free);
2925 		goto out;
2926 	}
2927 
2928 	spin_lock(&mapping->i_private_lock);
2929 	ret = drop_buffers(folio, &buffers_to_free);
2930 
2931 	/*
2932 	 * If the filesystem writes its buffers by hand (eg ext3)
2933 	 * then we can have clean buffers against a dirty folio.  We
2934 	 * clean the folio here; otherwise the VM will never notice
2935 	 * that the filesystem did any IO at all.
2936 	 *
2937 	 * Also, during truncate, discard_buffer will have marked all
2938 	 * the folio's buffers clean.  We discover that here and clean
2939 	 * the folio also.
2940 	 *
2941 	 * i_private_lock must be held over this entire operation in order
2942 	 * to synchronise against block_dirty_folio and prevent the
2943 	 * dirty bit from being lost.
2944 	 */
2945 	if (ret)
2946 		folio_cancel_dirty(folio);
2947 	spin_unlock(&mapping->i_private_lock);
2948 out:
2949 	if (buffers_to_free) {
2950 		struct buffer_head *bh = buffers_to_free;
2951 
2952 		do {
2953 			struct buffer_head *next = bh->b_this_page;
2954 			free_buffer_head(bh);
2955 			bh = next;
2956 		} while (bh != buffers_to_free);
2957 	}
2958 	return ret;
2959 }
2960 EXPORT_SYMBOL(try_to_free_buffers);
2961 
2962 /*
2963  * Buffer-head allocation
2964  */
2965 static struct kmem_cache *bh_cachep __ro_after_init;
2966 
2967 /*
2968  * Once the number of bh's in the machine exceeds this level, we start
2969  * stripping them in writeback.
2970  */
2971 static unsigned long max_buffer_heads __ro_after_init;
2972 
2973 int buffer_heads_over_limit;
2974 
2975 struct bh_accounting {
2976 	int nr;			/* Number of live bh's */
2977 	int ratelimit;		/* Limit cacheline bouncing */
2978 };
2979 
2980 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
2981 
2982 static void recalc_bh_state(void)
2983 {
2984 	int i;
2985 	int tot = 0;
2986 
2987 	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
2988 		return;
2989 	__this_cpu_write(bh_accounting.ratelimit, 0);
2990 	for_each_online_cpu(i)
2991 		tot += per_cpu(bh_accounting, i).nr;
2992 	buffer_heads_over_limit = (tot > max_buffer_heads);
2993 }
2994 
2995 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
2996 {
2997 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
2998 	if (ret) {
2999 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3000 		spin_lock_init(&ret->b_uptodate_lock);
3001 		preempt_disable();
3002 		__this_cpu_inc(bh_accounting.nr);
3003 		recalc_bh_state();
3004 		preempt_enable();
3005 	}
3006 	return ret;
3007 }
3008 EXPORT_SYMBOL(alloc_buffer_head);
3009 
3010 void free_buffer_head(struct buffer_head *bh)
3011 {
3012 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3013 	kmem_cache_free(bh_cachep, bh);
3014 	preempt_disable();
3015 	__this_cpu_dec(bh_accounting.nr);
3016 	recalc_bh_state();
3017 	preempt_enable();
3018 }
3019 EXPORT_SYMBOL(free_buffer_head);
3020 
3021 static int buffer_exit_cpu_dead(unsigned int cpu)
3022 {
3023 	int i;
3024 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3025 
3026 	for (i = 0; i < BH_LRU_SIZE; i++) {
3027 		brelse(b->bhs[i]);
3028 		b->bhs[i] = NULL;
3029 	}
3030 	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3031 	per_cpu(bh_accounting, cpu).nr = 0;
3032 	return 0;
3033 }
3034 
3035 /**
3036  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3037  * @bh: struct buffer_head
3038  *
3039  * Return true if the buffer is up-to-date and false,
3040  * with the buffer locked, if not.
3041  */
3042 int bh_uptodate_or_lock(struct buffer_head *bh)
3043 {
3044 	if (!buffer_uptodate(bh)) {
3045 		lock_buffer(bh);
3046 		if (!buffer_uptodate(bh))
3047 			return 0;
3048 		unlock_buffer(bh);
3049 	}
3050 	return 1;
3051 }
3052 EXPORT_SYMBOL(bh_uptodate_or_lock);
3053 
3054 /**
3055  * __bh_read - Submit read for a locked buffer
3056  * @bh: struct buffer_head
3057  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3058  * @wait: wait until reading finish
3059  *
3060  * Returns zero on success or don't wait, and -EIO on error.
3061  */
3062 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3063 {
3064 	int ret = 0;
3065 
3066 	BUG_ON(!buffer_locked(bh));
3067 
3068 	get_bh(bh);
3069 	bh->b_end_io = end_buffer_read_sync;
3070 	submit_bh(REQ_OP_READ | op_flags, bh);
3071 	if (wait) {
3072 		wait_on_buffer(bh);
3073 		if (!buffer_uptodate(bh))
3074 			ret = -EIO;
3075 	}
3076 	return ret;
3077 }
3078 EXPORT_SYMBOL(__bh_read);
3079 
3080 /**
3081  * __bh_read_batch - Submit read for a batch of unlocked buffers
3082  * @nr: entry number of the buffer batch
3083  * @bhs: a batch of struct buffer_head
3084  * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3085  * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3086  *              buffer that cannot lock.
3087  *
3088  * Returns zero on success or don't wait, and -EIO on error.
3089  */
3090 void __bh_read_batch(int nr, struct buffer_head *bhs[],
3091 		     blk_opf_t op_flags, bool force_lock)
3092 {
3093 	int i;
3094 
3095 	for (i = 0; i < nr; i++) {
3096 		struct buffer_head *bh = bhs[i];
3097 
3098 		if (buffer_uptodate(bh))
3099 			continue;
3100 
3101 		if (force_lock)
3102 			lock_buffer(bh);
3103 		else
3104 			if (!trylock_buffer(bh))
3105 				continue;
3106 
3107 		if (buffer_uptodate(bh)) {
3108 			unlock_buffer(bh);
3109 			continue;
3110 		}
3111 
3112 		bh->b_end_io = end_buffer_read_sync;
3113 		get_bh(bh);
3114 		submit_bh(REQ_OP_READ | op_flags, bh);
3115 	}
3116 }
3117 EXPORT_SYMBOL(__bh_read_batch);
3118 
3119 void __init buffer_init(void)
3120 {
3121 	unsigned long nrpages;
3122 	int ret;
3123 
3124 	bh_cachep = KMEM_CACHE(buffer_head,
3125 				SLAB_RECLAIM_ACCOUNT|SLAB_PANIC);
3126 	/*
3127 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3128 	 */
3129 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3130 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3131 	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3132 					NULL, buffer_exit_cpu_dead);
3133 	WARN_ON(ret < 0);
3134 }
3135